From f203cd3923cac792adb56e240012245e3748b4b2 Mon Sep 17 00:00:00 2001 From: sebhoss Date: Fri, 16 Feb 2024 10:07:44 +0000 Subject: [PATCH] Update upstream specifications to their latest version --- .../v1/redisenterpriseclusters.yaml | 3 + .../redisenterpriseactiveactivedatabases.yaml | 6 + .../v1alpha1/redisenterpriseclusters.yaml | 2 + .../v1alpha1/redisenterprisedatabases.yaml | 6 + .../v1/aerospikeclusters.yaml | 1 + .../v1beta1/aerospikeclusters.yaml | 1 + .../camel.apache.org/v1/integrationkits.yaml | 169 + .../camel.apache.org/v1/integrations.yaml | 1096 +++++ .../argoproj.io/v1alpha1/argocds.yaml | 2 +- .../argoproj.io/v1beta1/argocds.yaml | 12 +- .../v1alpha1/apis.yaml | 20 +- .../v1alpha1/authorizers.yaml | 24 +- .../v1alpha1/deployments.yaml | 22 +- .../v1alpha1/integrations.yaml | 28 +- .../v1alpha1/routes.yaml | 32 +- .../v1alpha1/stages.yaml | 26 +- .../v1alpha1/vpclinks.yaml | 18 +- .../v1alpha1/scalabletargets.yaml | 32 +- .../v1alpha1/scalingpolicies.yaml | 36 +- .../v1alpha1/backups.yaml | 22 +- .../v1alpha1/globaltables.yaml | 20 +- .../v1alpha1/tables.yaml | 72 +- .../v1alpha1/dhcpoptions.yaml | 24 +- .../v1alpha1/elasticipaddresses.yaml | 30 +- .../v1alpha1/instances.yaml | 106 +- .../v1alpha1/internetgateways.yaml | 30 +- .../v1alpha1/natgateways.yaml | 42 +- .../v1alpha1/routetables.yaml | 36 +- .../v1alpha1/securitygroups.yaml | 32 +- .../v1alpha1/subnets.yaml | 44 +- .../v1alpha1/transitgateways.yaml | 20 +- .../v1alpha1/vpcendpoints.yaml | 50 +- .../ec2.services.k8s.aws/v1alpha1/vpcs.yaml | 36 +- .../v1alpha1/pullthroughcacherules.yaml | 24 +- .../v1alpha1/repositories.yaml | 36 +- .../v1alpha1/jobruns.yaml | 22 +- .../v1alpha1/virtualclusters.yaml | 18 +- .../iam.services.k8s.aws/v1alpha1/groups.yaml | 30 +- .../v1alpha1/policies.yaml | 40 +- .../iam.services.k8s.aws/v1alpha1/roles.yaml | 46 +- .../v1alpha1/aliases.yaml | 24 +- .../kms.services.k8s.aws/v1alpha1/grants.yaml | 38 +- .../kms.services.k8s.aws/v1alpha1/keys.yaml | 62 +- .../mq.services.k8s.aws/v1alpha1/brokers.yaml | 36 +- .../v1alpha1/domains.yaml | 66 +- .../v1alpha1/alertmanagerdefinitions.yaml | 20 +- .../v1alpha1/rulegroupsnamespaces.yaml | 20 +- .../v1alpha1/workspaces.yaml | 18 +- .../v1alpha1/dbclusterparametergroups.yaml | 30 +- .../v1alpha1/dbclusters.yaml | 192 +- .../v1alpha1/dbinstances.yaml | 238 +- .../v1alpha1/dbparametergroups.yaml | 26 +- .../v1alpha1/dbproxies.yaml | 40 +- .../v1alpha1/dbsubnetgroups.yaml | 34 +- .../v1alpha1/globalclusters.yaml | 30 +- .../s3.services.k8s.aws/v1alpha1/buckets.yaml | 146 +- .../v1alpha1/apps.yaml | 22 +- .../v1alpha1/dataqualityjobdefinitions.yaml | 28 +- .../v1alpha1/domains.yaml | 44 +- .../v1alpha1/endpointconfigs.yaml | 38 +- .../v1alpha1/endpoints.yaml | 46 +- .../v1alpha1/featuregroups.yaml | 40 +- .../v1alpha1/hyperparametertuningjobs.yaml | 108 +- .../v1alpha1/modelbiasjobdefinitions.yaml | 28 +- .../modelexplainabilityjobdefinitions.yaml | 30 +- .../v1alpha1/modelpackagegroups.yaml | 22 +- .../v1alpha1/modelpackages.yaml | 64 +- .../v1alpha1/modelqualityjobdefinitions.yaml | 26 +- .../v1alpha1/models.yaml | 38 +- .../v1alpha1/monitoringschedules.yaml | 36 +- .../notebookinstancelifecycleconfigs.yaml | 24 +- .../v1alpha1/notebookinstances.yaml | 46 +- .../v1alpha1/processingjobs.yaml | 48 +- .../v1alpha1/trainingjobs.yaml | 84 +- .../v1alpha1/transformjobs.yaml | 46 +- .../v1alpha1/userprofiles.yaml | 36 +- .../v1alpha1/activities.yaml | 22 +- .../v1alpha1/statemachines.yaml | 28 +- .../v1beta1/ec2nodeclasses.yaml | 3 + .../karpenter.sh/v1beta1/nodepools.yaml | 2 +- .../v1/snapshotschedules.yaml | 22 +- .../claudie.io/v1beta1/inputmanifests.yaml | 6 +- .../cert-manager.io/v1/clusterissuers.yaml | 10 +- .../cert-manager.io/v1/issuers.yaml | 10 +- .../v1alpha1/bundles.yaml | 62 +- .../v2alpha1/ciliumbgppeeringpolicies.yaml | 25 +- .../v1beta1/cryostats.yaml | 19 +- .../v1beta1/clustersecretstores.yaml | 74 + .../v1beta1/secretstores.yaml | 74 + .../loki/loki.grafana.com/v1/lokistacks.yaml | 7 + .../v5/teleportroles.yaml | 44 + .../v6/teleportroles.yaml | 44 + .../app.terraform.io/v1alpha2/agentpools.yaml | 210 +- .../app.terraform.io/v1alpha2/modules.yaml | 2 +- .../app.terraform.io/v1alpha2/workspaces.yaml | 22 +- .../v1beta1/hcpvaultsecretsapps.yaml | 14 +- .../v1beta1/vaultdynamicsecrets.yaml | 8 - .../v1beta1/vaultpkisecrets.yaml | 13 +- .../v1beta1/vaultstaticsecrets.yaml | 8 - .../v1alpha1/managementcenters.yaml | 4 + .../v1alpha3/destinationrules.yaml | 12 + .../v1alpha3/gateways.yaml | 3 + .../v1alpha3/sidecars.yaml | 3 + .../v1beta1/destinationrules.yaml | 12 + .../networking.istio.io/v1beta1/gateways.yaml | 3 + .../networking.istio.io/v1beta1/sidecars.yaml | 3 + .../v1alpha3/vsphereclusteridentities.yaml | 112 + .../v1alpha3/vsphereclusters.yaml | 325 ++ .../v1alpha3/vspheredeploymentzones.yaml | 96 + .../v1alpha3/vspherefailuredomains.yaml | 123 + .../v1alpha3/vspheremachines.yaml | 304 ++ .../v1alpha3/vspheremachinetemplates.yaml | 265 + .../v1alpha3/vspherevms.yaml | 307 ++ .../v1alpha4/vsphereclusteridentities.yaml | 112 + .../v1alpha4/vsphereclusters.yaml | 145 + .../v1alpha4/vsphereclustertemplates.yaml | 86 + .../v1alpha4/vspheredeploymentzones.yaml | 96 + .../v1alpha4/vspherefailuredomains.yaml | 123 + .../v1alpha4/vspheremachines.yaml | 304 ++ .../v1alpha4/vspheremachinetemplates.yaml | 225 + .../v1alpha4/vspherevms.yaml | 307 ++ .../v1alpha3/clusterresourcesetbindings.yaml | 83 + .../v1alpha3/clusterresourcesets.yaml | 135 + .../cluster.x-k8s.io/v1alpha3/clusters.yaml | 217 + .../v1alpha3/machinedeployments.yaml | 326 ++ .../v1alpha3/machinehealthchecks.yaml | 207 + .../v1alpha3/machinepools.yaml | 347 ++ .../cluster.x-k8s.io/v1alpha3/machines.yaml | 243 + .../v1alpha3/machinesets.yaml | 288 ++ .../jobset.x-k8s.io/v1alpha2/jobsets.yaml | 1435 +++--- .../v1beta1/admissionchecks.yaml | 24 +- .../kueue.x-k8s.io/v1beta1/clusterqueues.yaml | 97 +- .../kueue.x-k8s.io/v1beta1/localqueues.yaml | 28 +- .../v1beta1/resourceflavors.yaml | 30 +- .../kueue.x-k8s.io/v1beta1/workloads.yaml | 1397 +++--- .../forklift.konveyor.io/v1beta1/hosts.yaml | 3 - .../mariadb.mmontes.io/v1alpha1/mariadbs.yaml | 24 +- .../v1alpha1/flowcollectors.yaml | 4 +- .../v1beta1/flowcollectors.yaml | 12 +- .../v1beta2/flowcollectors.yaml | 24 +- .../v1alpha1/opentelemetrycollectors.yaml | 3 +- .../v1/infrastructures.yaml | 6 + .../v1/dnsrecords.yaml | 1 + .../v1/controllerconfigs.yaml | 6 + .../v1/clustercsidrivers.yaml | 2 +- .../v1/ingresscontrollers.yaml | 1 + .../v1/clusterdeployments.yaml | 18 - .../v1/clusterdeprovisions.yaml | 22 - .../hive.openshift.io/v1/clusterpools.yaml | 18 - .../hive.openshift.io/v1/machinepools.yaml | 26 - .../v1/felixconfigurations.yaml | 2 +- .../v1alpha1/contourconfigurations.yaml | 7 +- .../v1alpha1/contourdeployments.yaml | 7 +- .../v1/servicemonitors.yaml | 2 +- .../quay.redhat.com/v1/quayregistries.yaml | 22 + .../rook/ceph.rook.io/v1/cephblockpools.yaml | 5 +- .../rook/ceph.rook.io/v1/cephclusters.yaml | 2 + .../rook/ceph.rook.io/v1/cephfilesystems.yaml | 6 + .../ceph.rook.io/v1/cephobjectstores.yaml | 6 + .../rook/ceph.rook.io/v1/cephobjectzones.yaml | 6 + .../gloo/gateway.solo.io/v1/gateways.yaml | 10 + .../gloo/gateway.solo.io/v1/httpgateways.yaml | 5 + .../gloo/gloo.solo.io/v1/settings.yaml | 5 + .../wildfly.org/v1alpha1/wildflyservers.yaml | 225 + .../acid.zalan.do/v1/postgresqls.yaml | 5 + .../src/addons_cluster_x_k8s_io/mod.rs | 1 + .../v1alpha3/clusterresourcesetbindings.rs | 55 + .../v1alpha3/clusterresourcesets.rs | 124 + .../addons_cluster_x_k8s_io/v1alpha3/mod.rs | 2 + .../v1alpha1/apis.rs | 43 +- .../v1alpha1/authorizers.rs | 67 +- .../v1alpha1/deployments.rs | 62 +- .../v1alpha1/routes.rs | 113 +- .../v1alpha1/stages.rs | 87 +- .../v1alpha1/vpclinks.rs | 35 +- .../v1/redisenterpriseclusters.rs | 3 + .../redisenterpriseactiveactivedatabases.rs | 6 + .../v1alpha1/redisenterpriseclusters.rs | 2 + .../v1alpha1/redisenterprisedatabases.rs | 6 + .../app_terraform_io/v1alpha2/agentpools.rs | 194 +- .../src/app_terraform_io/v1alpha2/modules.rs | 2 +- .../app_terraform_io/v1alpha2/workspaces.rs | 22 +- .../v1alpha1/scalabletargets.rs | 325 +- .../v1alpha1/scalingpolicies.rs | 420 +- .../src/argoproj_io/v1alpha1/argocds.rs | 4 +- .../src/argoproj_io/v1beta1/argocds.rs | 10 +- .../src/ceph_rook_io/v1/cephblockpools.rs | 7 +- .../src/ceph_rook_io/v1/cephfilesystems.rs | 6 + .../src/ceph_rook_io/v1/cephobjectstores.rs | 6 + .../src/ceph_rook_io/v1/cephobjectzones.rs | 6 + .../src/cert_manager_io/v1/clusterissuers.rs | 6 +- .../src/cert_manager_io/v1/issuers.rs | 6 +- .../src/cilium_io/v2/ciliumendpoints.rs | 2 +- .../v2alpha1/ciliumbgppeeringpolicies.rs | 5 +- .../v2alpha1/ciliumendpointslices.rs | 2 +- .../src/claudie_io/v1beta1/inputmanifests.rs | 9 +- .../src/cluster_x_k8s_io/mod.rs | 1 + .../src/cluster_x_k8s_io/v1alpha3/clusters.rs | 231 + .../v1alpha3/machinedeployments.rs | 439 ++ .../v1alpha3/machinehealthchecks.rs | 185 + .../cluster_x_k8s_io/v1alpha3/machinepools.rs | 490 ++ .../src/cluster_x_k8s_io/v1alpha3/machines.rs | 310 ++ .../cluster_x_k8s_io/v1alpha3/machinesets.rs | 398 ++ .../src/cluster_x_k8s_io/v1alpha3/mod.rs | 6 + .../config_openshift_io/v1/infrastructures.rs | 4 + .../v1alpha1/backups.rs | 52 +- .../v1alpha1/globaltables.rs | 53 +- .../v1alpha1/tables.rs | 446 +- .../v1alpha1/dhcpoptions.rs | 55 +- .../v1alpha1/elasticipaddresses.rs | 65 +- .../v1alpha1/instances.rs | 389 +- .../v1alpha1/internetgateways.rs | 84 +- .../v1alpha1/natgateways.rs | 175 +- .../v1alpha1/routetables.rs | 101 +- .../v1alpha1/securitygroups.rs | 111 +- .../ec2_services_k8s_aws/v1alpha1/subnets.rs | 136 +- .../v1alpha1/transitgateways.rs | 39 +- .../v1alpha1/vpcendpoints.rs | 155 +- .../src/ec2_services_k8s_aws/v1alpha1/vpcs.rs | 94 +- .../v1alpha1/pullthroughcacherules.rs | 45 +- .../v1alpha1/repositories.rs | 77 +- .../v1alpha1/jobruns.rs | 63 +- .../v1alpha1/virtualclusters.rs | 41 +- .../v1beta1/clustersecretstores.rs | 93 + .../v1beta1/secretstores.rs | 93 + .../v1alpha1/flowcollectors.rs | 2 +- .../v1beta1/flowcollectors.rs | 14 +- .../v1beta2/flowcollectors.rs | 38 +- .../src/forklift_konveyor_io/v1beta1/hosts.rs | 3 - .../v1alpha1/managementcenters.rs | 3 + .../v1/clusterdeployments.rs | 21 - .../v1/clusterdeprovisions.rs | 24 - .../src/hive_openshift_io/v1/clusterpools.rs | 21 - .../src/hive_openshift_io/v1/machinepools.rs | 34 - .../iam_services_k8s_aws/v1alpha1/groups.rs | 100 +- .../iam_services_k8s_aws/v1alpha1/policies.rs | 181 +- .../iam_services_k8s_aws/v1alpha1/roles.rs | 216 +- .../infrastructure_cluster_x_k8s_io/mod.rs | 2 + .../v1alpha3/mod.rs | 7 + .../v1alpha3/vsphereclusteridentities.rs | 83 + .../v1alpha3/vsphereclusters.rs | 319 ++ .../v1alpha3/vspheredeploymentzones.rs | 70 + .../v1alpha3/vspherefailuredomains.rs | 98 + .../v1alpha3/vspheremachines.rs | 221 + .../v1alpha3/vspheremachinetemplates.rs | 211 + .../v1alpha3/vspherevms.rs | 249 + .../v1alpha4/mod.rs | 8 + .../v1alpha4/vsphereclusteridentities.rs | 83 + .../v1alpha4/vsphereclusters.rs | 100 + .../v1alpha4/vsphereclustertemplates.rs | 64 + .../v1alpha4/vspheredeploymentzones.rs | 70 + .../v1alpha4/vspherefailuredomains.rs | 98 + .../v1alpha4/vspheremachines.rs | 221 + .../v1alpha4/vspheremachinetemplates.rs | 172 + .../v1alpha4/vspherevms.rs | 249 + .../src/jobset_x_k8s_io/v1alpha2/jobsets.rs | 4280 +++++++++++++---- .../v1beta1/ec2nodeclasses.rs | 3 + .../kms_services_k8s_aws/v1alpha1/aliases.rs | 98 +- .../kms_services_k8s_aws/v1alpha1/grants.rs | 240 +- .../src/kms_services_k8s_aws/v1alpha1/keys.rs | 442 +- .../kueue_x_k8s_io/v1beta1/admissionchecks.rs | 74 +- .../kueue_x_k8s_io/v1beta1/clusterqueues.rs | 408 +- .../src/kueue_x_k8s_io/v1beta1/localqueues.rs | 77 +- .../kueue_x_k8s_io/v1beta1/resourceflavors.rs | 71 +- .../src/kueue_x_k8s_io/v1beta1/workloads.rs | 4151 ++++++++++++---- kube-custom-resources-rs/src/lib.rs | 31 + .../src/loki_grafana_com/v1/lokistacks.rs | 14 + .../v1/controllerconfigs.rs | 4 + .../mariadb_mmontes_io/v1alpha1/mariadbs.rs | 36 +- .../v1/servicemonitors.rs | 3 +- .../mq_services_k8s_aws/v1alpha1/brokers.rs | 116 +- .../v1alpha1/domains.rs | 145 +- .../v1alpha1/opentelemetrycollectors.rs | 4 +- .../operator_cryostat_io/v1beta1/cryostats.rs | 14 +- .../v1alpha1/contourconfigurations.rs | 26 +- .../v1alpha1/contourdeployments.rs | 30 +- .../v1alpha1/alertmanagerdefinitions.rs | 55 +- .../v1alpha1/rulegroupsnamespaces.rs | 55 +- .../v1alpha1/workspaces.rs | 32 +- .../src/quay_redhat_com/v1/quayregistries.rs | 14 + .../v1alpha1/dbclusterparametergroups.rs | 181 +- .../v1alpha1/dbclusters.rs | 1419 ++++-- .../v1alpha1/dbinstances.rs | 2489 +++++++--- .../v1alpha1/dbparametergroups.rs | 166 +- .../v1alpha1/dbproxies.rs | 91 +- .../v1alpha1/dbsubnetgroups.rs | 143 +- .../v1alpha1/globalclusters.rs | 63 +- .../s3_services_k8s_aws/v1alpha1/buckets.rs | 557 ++- .../v1alpha1/apps.rs | 72 +- .../v1alpha1/dataqualityjobdefinitions.rs | 78 +- .../v1alpha1/domains.rs | 145 +- .../v1alpha1/endpointconfigs.rs | 147 +- .../v1alpha1/endpoints.rs | 182 +- .../v1alpha1/featuregroups.rs | 227 +- .../v1alpha1/hyperparametertuningjobs.rs | 788 ++- .../v1alpha1/modelbiasjobdefinitions.rs | 75 +- .../modelexplainabilityjobdefinitions.rs | 81 +- .../v1alpha1/modelpackagegroups.rs | 57 +- .../v1alpha1/modelpackages.rs | 227 +- .../v1alpha1/modelqualityjobdefinitions.rs | 72 +- .../v1alpha1/models.rs | 129 +- .../v1alpha1/monitoringschedules.rs | 106 +- .../notebookinstancelifecycleconfigs.rs | 90 +- .../v1alpha1/notebookinstances.rs | 132 +- .../v1alpha1/processingjobs.rs | 154 +- .../v1alpha1/trainingjobs.rs | 586 ++- .../v1alpha1/transformjobs.rs | 181 +- .../v1alpha1/userprofiles.rs | 105 +- .../v1beta1/hcpvaultsecretsapps.rs | 9 +- .../v1beta1/vaultdynamicsecrets.rs | 6 +- .../v1beta1/vaultpkisecrets.rs | 9 +- .../v1beta1/vaultstaticsecrets.rs | 6 +- .../v1alpha1/activities.rs | 92 +- .../v1alpha1/statemachines.rs | 112 +- .../v1/snapshotschedules.rs | 27 +- .../trust_cert_manager_io/v1alpha1/bundles.rs | 134 +- .../wildfly_org/v1alpha1/wildflyservers.rs | 195 + 317 files changed, 33509 insertions(+), 8529 deletions(-) create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusteridentities.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusters.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheredeploymentzones.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherefailuredomains.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachines.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachinetemplates.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherevms.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusteridentities.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusters.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclustertemplates.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheredeploymentzones.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherefailuredomains.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachines.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachinetemplates.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherevms.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesetbindings.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesets.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/clusters.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinedeployments.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinehealthchecks.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinepools.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machines.yaml create mode 100644 crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinesets.yaml create mode 100644 kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesetbindings.rs create mode 100644 kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesets.rs create mode 100644 kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/mod.rs create mode 100644 kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/clusters.rs create mode 100644 kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinedeployments.rs create mode 100644 kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinehealthchecks.rs create mode 100644 kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinepools.rs create mode 100644 kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machines.rs create mode 100644 kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinesets.rs create mode 100644 kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/mod.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/mod.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusteridentities.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusters.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheredeploymentzones.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherefailuredomains.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachines.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachinetemplates.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherevms.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/mod.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusteridentities.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusters.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclustertemplates.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheredeploymentzones.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherefailuredomains.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachines.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachinetemplates.rs create mode 100644 kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherevms.rs diff --git a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1/redisenterpriseclusters.yaml b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1/redisenterpriseclusters.yaml index 2ace31026..aabd3621b 100644 --- a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1/redisenterpriseclusters.yaml +++ b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1/redisenterpriseclusters.yaml @@ -3410,6 +3410,9 @@ spec: - "major" - "latest" type: "string" + resp3Default: + description: "Whether databases will turn on RESP3 compatibility upon database upgrade. Note - Deleting this property after explicitly setting its value shall have no effect. Please view the corresponding field in RS doc for more info." + type: "boolean" serviceAccountName: description: "Name of the service account to use" type: "string" diff --git a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseactiveactivedatabases.yaml b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseactiveactivedatabases.yaml index 5f263ec0f..1db8fce70 100644 --- a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseactiveactivedatabases.yaml +++ b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseactiveactivedatabases.yaml @@ -461,6 +461,9 @@ spec: replication: description: "In-memory database replication. When enabled, database will have replica shard for every master - leading to higher availability." type: "boolean" + resp3: + description: "Whether this database supports RESP3 protocol. Note - Deleting this property after explicitly setting its value shall have no effect. Please view the corresponding field in RS doc for more info." + type: "boolean" rofRamSize: description: "The size of the RAM portion of an RoF database. Similarly to \"memorySize\" use formats like 100MB, 0.1GB It must be at least 10% of combined memory size (RAM+Flash), as specified by \"memorySize\"." type: "string" @@ -487,6 +490,9 @@ spec: shardCount: description: "Number of database server-side shards" type: "integer" + shardingEnabled: + description: "Toggles database sharding for REAADBs (Active Active databases) and enabled by default. This field is blocked for REDB (non-Active Active databases) and sharding is toggled via the shardCount field - when shardCount is 1 this is disabled otherwise enabled." + type: "boolean" shardsPlacement: description: "Control the density of shards - should they reside on as few or as many nodes as possible. Available options are \"dense\" or \"sparse\". If left unset, defaults to \"dense\"." type: "string" diff --git a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseclusters.yaml b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseclusters.yaml index cf2accb2c..e5bdf315f 100644 --- a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseclusters.yaml +++ b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterpriseclusters.yaml @@ -3292,6 +3292,8 @@ spec: - "major" - "latest" type: "string" + resp3Default: + type: "boolean" serviceAccountName: type: "string" services: diff --git a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterprisedatabases.yaml b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterprisedatabases.yaml index 458a6022f..efeede9a8 100644 --- a/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterprisedatabases.yaml +++ b/crd-catalog/RedisLabs/redis-enterprise-k8s-docs/app.redislabs.com/v1alpha1/redisenterprisedatabases.yaml @@ -397,6 +397,9 @@ spec: replication: description: "In-memory database replication. When enabled, database will have replica shard for every master - leading to higher availability." type: "boolean" + resp3: + description: "Whether this database supports RESP3 protocol. Note - Deleting this property after explicitly setting its value shall have no effect. Please view the corresponding field in RS doc for more info." + type: "boolean" rofRamSize: description: "The size of the RAM portion of an RoF database. Similarly to \"memorySize\" use formats like 100MB, 0.1GB. It must be at least 10% of combined memory size (RAM and Flash), as specified by \"memorySize\"." type: "string" @@ -423,6 +426,9 @@ spec: shardCount: description: "Number of database server-side shards" type: "integer" + shardingEnabled: + description: "Toggles database sharding for REAADBs (Active Active databases) and enabled by default. This field is blocked for REDB (non-Active Active databases) and sharding is toggled via the shardCount field - when shardCount is 1 this is disabled otherwise enabled." + type: "boolean" shardsPlacement: description: "Control the density of shards - should they reside on as few or as many nodes as possible. Available options are \"dense\" or \"sparse\". If left unset, defaults to \"dense\"." enum: diff --git a/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1/aerospikeclusters.yaml b/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1/aerospikeclusters.yaml index 226540934..283ff418b 100644 --- a/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1/aerospikeclusters.yaml +++ b/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1/aerospikeclusters.yaml @@ -2,6 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: + aerospike-kubernetes-operator/version: "3.2.1" controller-gen.kubebuilder.io/version: "v0.12.1" name: "aerospikeclusters.asdb.aerospike.com" spec: diff --git a/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1beta1/aerospikeclusters.yaml b/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1beta1/aerospikeclusters.yaml index f3ef96645..fcb470837 100644 --- a/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1beta1/aerospikeclusters.yaml +++ b/crd-catalog/aerospike/aerospike-kubernetes-operator/asdb.aerospike.com/v1beta1/aerospikeclusters.yaml @@ -2,6 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: + aerospike-kubernetes-operator/version: "3.2.1" controller-gen.kubebuilder.io/version: "v0.12.1" name: "aerospikeclusters.asdb.aerospike.com" spec: diff --git a/crd-catalog/apache/camel-k/camel.apache.org/v1/integrationkits.yaml b/crd-catalog/apache/camel-k/camel.apache.org/v1/integrationkits.yaml index 06f4e5c34..4c96e7b6b 100644 --- a/crd-catalog/apache/camel-k/camel.apache.org/v1/integrationkits.yaml +++ b/crd-catalog/apache/camel-k/camel.apache.org/v1/integrationkits.yaml @@ -431,6 +431,175 @@ spec: runtimeVersion: description: "the runtime version for which this kit was configured" type: "string" + traits: + description: "generated traits executed by the kit" + properties: + addons: + additionalProperties: + description: "AddonTrait represents the configuration of an addon trait." + type: "object" + x-kubernetes-preserve-unknown-fields: true + description: "The collection of addon trait configurations" + type: "object" + builder: + description: "The builder trait is internally used to determine the best strategy to build and configure IntegrationKits." + properties: + annotations: + additionalProperties: + type: "string" + description: "When using `pod` strategy, annotation to use for the builder pod." + type: "object" + baseImage: + description: "Specify a base image" + type: "string" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + incrementalImageBuild: + description: "Use the incremental image build option, to reuse existing containers (default `true`)" + type: "boolean" + limitCPU: + description: "When using `pod` strategy, the maximum amount of CPU required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + limitMemory: + description: "When using `pod` strategy, the maximum amount of memory required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + mavenProfiles: + description: "A list of references pointing to configmaps/secrets that contains a maven profile. The content of the maven profile is expected to be a text containing a valid maven profile starting with `` and ending with `` that will be integrated as an inline profile in the POM. Syntax: [configmap|secret]:name[/key], where name represents the resource name, key optionally represents the resource key to be filtered (default key value = profile.xml)." + items: + type: "string" + type: "array" + nodeSelector: + additionalProperties: + type: "string" + description: "Defines a set of nodes the builder pod is eligible to be scheduled on, based on labels on the node." + type: "object" + orderStrategy: + description: "The build order strategy to use, either `dependencies`, `fifo` or `sequential` (default `sequential`)" + enum: + - "dependencies" + - "fifo" + - "sequential" + type: "string" + properties: + description: "A list of properties to be provided to the build task" + items: + type: "string" + type: "array" + requestCPU: + description: "When using `pod` strategy, the minimum amount of CPU required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + requestMemory: + description: "When using `pod` strategy, the minimum amount of memory required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + strategy: + description: "The strategy to use, either `pod` or `routine` (default `routine`)" + enum: + - "pod" + - "routine" + type: "string" + tasks: + description: "A list of tasks to be executed (available only when using `pod` strategy) with format `;;`." + items: + type: "string" + type: "array" + tasksFilter: + description: "A list of tasks sorted by the order of execution in a csv format, ie, `,,...`. Mind that you must include also the operator tasks (`builder`, `quarkus-native`, `package`, `jib`, `spectrum`, `s2i`) if you need to execute them. Useful only with `pod` strategy." + type: "string" + tasksLimitCPU: + description: "A list of limit cpu configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + tasksLimitMemory: + description: "A list of limit memory configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + tasksRequestCPU: + description: "A list of request cpu configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + tasksRequestMemory: + description: "A list of request memory configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + verbose: + description: "Enable verbose logging on build components that support it (e.g. Kaniko build pod). Deprecated no longer in use" + type: "boolean" + type: "object" + camel: + description: "The Camel trait sets up Camel configuration." + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + properties: + description: "A list of properties to be provided to the Integration runtime" + items: + type: "string" + type: "array" + runtimeVersion: + description: "The camel-k-runtime version to use for the integration. It overrides the default version set in the Integration Platform." + type: "string" + type: "object" + quarkus: + description: "The Quarkus trait configures the Quarkus runtime. It's enabled by default. NOTE: Compiling to a native executable, requires at least 4GiB of memory, so the Pod running the native build must have enough memory available." + properties: + buildMode: + description: "The Quarkus mode to run: either `jvm` or `native` (default `jvm`). In case both `jvm` and `native` are specified, two `IntegrationKit` resources are created, with the `native` kit having precedence over the `jvm` one once ready." + items: + description: "QuarkusMode is the type of Quarkus build packaging." + enum: + - "jvm" + - "native" + type: "string" + type: "array" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + nativeBaseImage: + description: "The base image to use when running a native build (default `quay.io/quarkus/quarkus-micro-image:2.0`)" + type: "string" + nativeBuilderImage: + description: "The image containing the tooling required for a native build (by default it will use the one provided in the runtime catalog)" + type: "string" + packageTypes: + description: "The Quarkus package types, `fast-jar` or `native` (default `fast-jar`). In case both `fast-jar` and `native` are specified, two `IntegrationKit` resources are created, with the native kit having precedence over the `fast-jar` one once ready. The order influences the resolution of the current kit for the integration. The kit corresponding to the first package type will be assigned to the integration in case no existing kit that matches the integration exists. Deprecated: use `build-mode` instead." + items: + description: "QuarkusPackageType is the type of Quarkus build packaging. Deprecated: use `QuarkusMode` instead." + enum: + - "fast-jar" + - "native" + type: "string" + type: "array" + type: "object" + registry: + description: "The Registry trait sets up Maven to use the Image registry as a Maven repository." + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + type: "object" + type: "object" version: description: "the Camel K operator version for which this kit was configured" type: "string" diff --git a/crd-catalog/apache/camel-k/camel.apache.org/v1/integrations.yaml b/crd-catalog/apache/camel-k/camel.apache.org/v1/integrations.yaml index d4f3685c4..cbce31018 100644 --- a/crd-catalog/apache/camel-k/camel.apache.org/v1/integrations.yaml +++ b/crd-catalog/apache/camel-k/camel.apache.org/v1/integrations.yaml @@ -5060,6 +5060,1102 @@ spec: selector: description: "label selector" type: "string" + traits: + description: "the traits generated and executed for this Integration" + properties: + 3scale: + description: "Deprecated: for backward compatibility." + properties: + configuration: + description: "TraitConfiguration parameters configuration" + type: "object" + x-kubernetes-preserve-unknown-fields: true + required: + - "configuration" + type: "object" + addons: + additionalProperties: + description: "AddonTrait represents the configuration of an addon trait." + type: "object" + x-kubernetes-preserve-unknown-fields: true + description: "The extension point with addon traits" + type: "object" + affinity: + description: "The configuration of Affinity trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + nodeAffinityLabels: + description: "Defines a set of nodes the integration pod(s) are eligible to be scheduled on, based on labels on the node." + items: + type: "string" + type: "array" + podAffinity: + description: "Always co-locates multiple replicas of the integration in the same node (default `false`)." + type: "boolean" + podAffinityLabels: + description: "Defines a set of pods (namely those matching the label selector, relative to the given namespace) that the integration pod(s) should be co-located with." + items: + type: "string" + type: "array" + podAntiAffinity: + description: "Never co-locates multiple replicas of the integration in the same node (default `false`)." + type: "boolean" + podAntiAffinityLabels: + description: "Defines a set of pods (namely those matching the label selector, relative to the given namespace) that the integration pod(s) should not be co-located with." + items: + type: "string" + type: "array" + type: "object" + builder: + description: "The configuration of Builder trait" + properties: + annotations: + additionalProperties: + type: "string" + description: "When using `pod` strategy, annotation to use for the builder pod." + type: "object" + baseImage: + description: "Specify a base image" + type: "string" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + incrementalImageBuild: + description: "Use the incremental image build option, to reuse existing containers (default `true`)" + type: "boolean" + limitCPU: + description: "When using `pod` strategy, the maximum amount of CPU required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + limitMemory: + description: "When using `pod` strategy, the maximum amount of memory required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + mavenProfiles: + description: "A list of references pointing to configmaps/secrets that contains a maven profile. The content of the maven profile is expected to be a text containing a valid maven profile starting with `` and ending with `` that will be integrated as an inline profile in the POM. Syntax: [configmap|secret]:name[/key], where name represents the resource name, key optionally represents the resource key to be filtered (default key value = profile.xml)." + items: + type: "string" + type: "array" + nodeSelector: + additionalProperties: + type: "string" + description: "Defines a set of nodes the builder pod is eligible to be scheduled on, based on labels on the node." + type: "object" + orderStrategy: + description: "The build order strategy to use, either `dependencies`, `fifo` or `sequential` (default `sequential`)" + enum: + - "dependencies" + - "fifo" + - "sequential" + type: "string" + properties: + description: "A list of properties to be provided to the build task" + items: + type: "string" + type: "array" + requestCPU: + description: "When using `pod` strategy, the minimum amount of CPU required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + requestMemory: + description: "When using `pod` strategy, the minimum amount of memory required by the pod builder. Deprecated: use TasksRequestCPU instead with task name `builder`." + type: "string" + strategy: + description: "The strategy to use, either `pod` or `routine` (default `routine`)" + enum: + - "pod" + - "routine" + type: "string" + tasks: + description: "A list of tasks to be executed (available only when using `pod` strategy) with format `;;`." + items: + type: "string" + type: "array" + tasksFilter: + description: "A list of tasks sorted by the order of execution in a csv format, ie, `,,...`. Mind that you must include also the operator tasks (`builder`, `quarkus-native`, `package`, `jib`, `spectrum`, `s2i`) if you need to execute them. Useful only with `pod` strategy." + type: "string" + tasksLimitCPU: + description: "A list of limit cpu configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + tasksLimitMemory: + description: "A list of limit memory configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + tasksRequestCPU: + description: "A list of request cpu configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + tasksRequestMemory: + description: "A list of request memory configuration for the specific task with format `:`." + items: + type: "string" + type: "array" + verbose: + description: "Enable verbose logging on build components that support it (e.g. Kaniko build pod). Deprecated no longer in use" + type: "boolean" + type: "object" + camel: + description: "The configuration of Camel trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + properties: + description: "A list of properties to be provided to the Integration runtime" + items: + type: "string" + type: "array" + runtimeVersion: + description: "The camel-k-runtime version to use for the integration. It overrides the default version set in the Integration Platform." + type: "string" + type: "object" + container: + description: "The configuration of Container trait" + properties: + auto: + description: "To automatically enable the trait" + type: "boolean" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + expose: + description: "Can be used to enable/disable exposure via kubernetes Service." + type: "boolean" + image: + description: "The main container image" + type: "string" + imagePullPolicy: + description: "The pull policy: Always|Never|IfNotPresent" + enum: + - "Always" + - "Never" + - "IfNotPresent" + type: "string" + limitCPU: + description: "The maximum amount of CPU required." + type: "string" + limitMemory: + description: "The maximum amount of memory required." + type: "string" + name: + description: "The main container name. It's named `integration` by default." + type: "string" + port: + description: "To configure a different port exposed by the container (default `8080`)." + type: "integer" + portName: + description: "To configure a different port name for the port exposed by the container. It defaults to `http` only when the `expose` parameter is true." + type: "string" + requestCPU: + description: "The minimum amount of CPU required." + type: "string" + requestMemory: + description: "The minimum amount of memory required." + type: "string" + servicePort: + description: "To configure under which service port the container port is to be exposed (default `80`)." + type: "integer" + servicePortName: + description: "To configure under which service port name the container port is to be exposed (default `http`)." + type: "string" + type: "object" + cron: + description: "The configuration of Cron trait" + properties: + activeDeadlineSeconds: + description: "Specifies the duration in seconds, relative to the start time, that the job may be continuously active before it is considered to be failed. It defaults to 60s." + format: "int64" + type: "integer" + auto: + description: "Automatically deploy the integration as CronJob when all routes are either starting from a periodic consumer (only `cron`, `timer` and `quartz` are supported) or a passive consumer (e.g. `direct` is a passive consumer). \n It's required that all periodic consumers have the same period, and it can be expressed as cron schedule (e.g. `1m` can be expressed as `0/1 * * * *`, while `35m` or `50s` cannot)." + type: "boolean" + backoffLimit: + description: "Specifies the number of retries before marking the job failed. It defaults to 2." + format: "int32" + type: "integer" + components: + description: "A comma separated list of the Camel components that need to be customized in order for them to work when the schedule is triggered externally by Kubernetes. A specific customizer is activated for each specified component. E.g. for the `timer` component, the `cron-timer` customizer is activated (it's present in the `org.apache.camel.k:camel-k-cron` library). \n Supported components are currently: `cron`, `timer` and `quartz`." + type: "string" + concurrencyPolicy: + description: "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\": allows CronJobs to run concurrently; - \"Forbid\" (default): forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one" + enum: + - "Allow" + - "Forbid" + - "Replace" + type: "string" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + fallback: + description: "Use the default Camel implementation of the `cron` endpoint (`quartz`) instead of trying to materialize the integration as Kubernetes CronJob." + type: "boolean" + schedule: + description: "The CronJob schedule for the whole integration. If multiple routes are declared, they must have the same schedule for this mechanism to work correctly." + type: "string" + startingDeadlineSeconds: + description: "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones." + format: "int64" + type: "integer" + type: "object" + dependencies: + description: "The configuration of Dependencies trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + type: "object" + deployer: + description: "The configuration of Deployer trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + kind: + description: "Allows to explicitly select the desired deployment kind between `deployment`, `cron-job` or `knative-service` when creating the resources for running the integration." + enum: + - "deployment" + - "cron-job" + - "knative-service" + type: "string" + useSSA: + description: "Use server-side apply to update the owned resources (default `true`). Note that it automatically falls back to client-side patching, if SSA is not available, e.g., on old Kubernetes clusters." + type: "boolean" + type: "object" + deployment: + description: "The configuration of Deployment trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + progressDeadlineSeconds: + description: "The maximum time in seconds for the deployment to make progress before it is considered to be failed. It defaults to `60s`." + format: "int32" + type: "integer" + rollingUpdateMaxSurge: + description: "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to `25%`." + type: "integer" + rollingUpdateMaxUnavailable: + description: "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to `25%`." + type: "integer" + strategy: + description: "The deployment strategy to use to replace existing pods with new ones." + enum: + - "Recreate" + - "RollingUpdate" + type: "string" + type: "object" + environment: + description: "The configuration of Environment trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + containerMeta: + description: "Enables injection of `NAMESPACE` and `POD_NAME` environment variables (default `true`)" + type: "boolean" + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + httpProxy: + description: "Propagates the `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables (default `true`)" + type: "boolean" + vars: + description: "A list of environment variables to be added to the integration container. The syntax is KEY=VALUE, e.g., `MY_VAR=\"my value\"`. These take precedence over the previously defined environment variables." + items: + type: "string" + type: "array" + type: "object" + error-handler: + description: "The configuration of Error Handler trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + ref: + description: "The error handler ref name provided or found in application properties" + type: "string" + type: "object" + gc: + description: "The configuration of GC trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + discoveryCache: + description: "Discovery client cache to be used, either `disabled`, `disk` or `memory` (default `memory`). Deprecated: to be removed from trait configuration." + enum: + - "disabled" + - "disk" + - "memory" + type: "string" + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + type: "object" + health: + description: "The configuration of Health trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + livenessFailureThreshold: + description: "Minimum consecutive failures for the liveness probe to be considered failed after having succeeded." + format: "int32" + type: "integer" + livenessInitialDelay: + description: "Number of seconds after the container has started before the liveness probe is initiated." + format: "int32" + type: "integer" + livenessPeriod: + description: "How often to perform the liveness probe." + format: "int32" + type: "integer" + livenessProbeEnabled: + description: "Configures the liveness probe for the integration container (default `false`)." + type: "boolean" + livenessScheme: + description: "Scheme to use when connecting to the liveness probe (default `HTTP`)." + type: "string" + livenessSuccessThreshold: + description: "Minimum consecutive successes for the liveness probe to be considered successful after having failed." + format: "int32" + type: "integer" + livenessTimeout: + description: "Number of seconds after which the liveness probe times out." + format: "int32" + type: "integer" + readinessFailureThreshold: + description: "Minimum consecutive failures for the readiness probe to be considered failed after having succeeded." + format: "int32" + type: "integer" + readinessInitialDelay: + description: "Number of seconds after the container has started before the readiness probe is initiated." + format: "int32" + type: "integer" + readinessPeriod: + description: "How often to perform the readiness probe." + format: "int32" + type: "integer" + readinessProbeEnabled: + description: "Configures the readiness probe for the integration container (default `true`)." + type: "boolean" + readinessScheme: + description: "Scheme to use when connecting to the readiness probe (default `HTTP`)." + type: "string" + readinessSuccessThreshold: + description: "Minimum consecutive successes for the readiness probe to be considered successful after having failed." + format: "int32" + type: "integer" + readinessTimeout: + description: "Number of seconds after which the readiness probe times out." + format: "int32" + type: "integer" + startupFailureThreshold: + description: "Minimum consecutive failures for the startup probe to be considered failed after having succeeded." + format: "int32" + type: "integer" + startupInitialDelay: + description: "Number of seconds after the container has started before the startup probe is initiated." + format: "int32" + type: "integer" + startupPeriod: + description: "How often to perform the startup probe." + format: "int32" + type: "integer" + startupProbeEnabled: + description: "Configures the startup probe for the integration container (default `false`)." + type: "boolean" + startupScheme: + description: "Scheme to use when connecting to the startup probe (default `HTTP`)." + type: "string" + startupSuccessThreshold: + description: "Minimum consecutive successes for the startup probe to be considered successful after having failed." + format: "int32" + type: "integer" + startupTimeout: + description: "Number of seconds after which the startup probe times out." + format: "int32" + type: "integer" + type: "object" + ingress: + description: "The configuration of Ingress trait" + properties: + annotations: + additionalProperties: + type: "string" + description: "The annotations added to the ingress. This can be used to set controller specific annotations, e.g., when using the NGINX Ingress controller: See https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md" + type: "object" + auto: + description: "To automatically add an ingress whenever the integration uses an HTTP endpoint consumer." + type: "boolean" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + host: + description: "To configure the host exposed by the ingress." + type: "string" + path: + description: "To configure the path exposed by the ingress (default `/`)." + type: "string" + pathType: + description: "To configure the path type exposed by the ingress. One of `Exact`, `Prefix`, `ImplementationSpecific` (default to `Prefix`)." + enum: + - "Exact" + - "Prefix" + - "ImplementationSpecific" + type: "string" + type: "object" + istio: + description: "The configuration of Istio trait" + properties: + allow: + description: "Configures a (comma-separated) list of CIDR subnets that should not be intercepted by the Istio proxy (`10.0.0.0/8,172.16.0.0/12,192.168.0.0/16` by default)." + type: "string" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + inject: + description: "Forces the value for labels `sidecar.istio.io/inject`. By default the label is set to `true` on deployment and not set on Knative Service." + type: "boolean" + type: "object" + jolokia: + description: "The configuration of Jolokia trait" + properties: + CACert: + description: "The PEM encoded CA certification file path, used to verify client certificates, applicable when `protocol` is `https` and `use-ssl-client-authentication` is `true` (default `/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt` for OpenShift)." + type: "string" + clientPrincipal: + description: "The principal(s) which must be given in a client certificate to allow access to the Jolokia endpoint, applicable when `protocol` is `https` and `use-ssl-client-authentication` is `true` (default `clientPrincipal=cn=system:master-proxy`, `cn=hawtio-online.hawtio.svc` and `cn=fuse-console.fuse.svc` for OpenShift)." + items: + type: "string" + type: "array" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + discoveryEnabled: + description: "Listen for multicast requests (default `false`)" + type: "boolean" + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + extendedClientCheck: + description: "Mandate the client certificate contains a client flag in the extended key usage section, applicable when `protocol` is `https` and `use-ssl-client-authentication` is `true` (default `true` for OpenShift)." + type: "boolean" + host: + description: "The Host address to which the Jolokia agent should bind to. If `\"\\*\"` or `\"0.0.0.0\"` is given, the servers binds to every network interface (default `\"*\"`)." + type: "string" + options: + description: "A list of additional Jolokia options as defined in https://jolokia.org/reference/html/agents.html#agent-jvm-config[JVM agent configuration options]" + items: + type: "string" + type: "array" + password: + description: "The password used for authentication, applicable when the `user` option is set." + type: "string" + port: + description: "The Jolokia endpoint port (default `8778`)." + type: "integer" + protocol: + description: "The protocol to use, either `http` or `https` (default `https` for OpenShift)" + type: "string" + useSSLClientAuthentication: + description: "Whether client certificates should be used for authentication (default `true` for OpenShift)." + type: "boolean" + user: + description: "The user to be used for authentication" + type: "string" + type: "object" + jvm: + description: "The configuration of JVM trait" + properties: + classpath: + description: "Additional JVM classpath (use `Linux` classpath separator)" + type: "string" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + debug: + description: "Activates remote debugging, so that a debugger can be attached to the JVM, e.g., using port-forwarding" + type: "boolean" + debugAddress: + description: "Transport address at which to listen for the newly launched JVM (default `*:5005`)" + type: "string" + debugSuspend: + description: "Suspends the target JVM immediately before the main class is loaded" + type: "boolean" + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + options: + description: "A list of JVM options" + items: + type: "string" + type: "array" + printCommand: + description: "Prints the command used the start the JVM in the container logs (default `true`)" + type: "boolean" + type: "object" + kamelets: + description: "The configuration of Kamelets trait" + properties: + auto: + description: "Automatically inject all referenced Kamelets and their default configuration (enabled by default)" + type: "boolean" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + list: + description: "Comma separated list of Kamelet names to load into the current integration" + type: "string" + mountPoint: + description: "The directory where the application mounts and reads Kamelet spec (default `/etc/camel/kamelets`)" + type: "string" + type: "object" + keda: + description: "Deprecated: for backward compatibility." + properties: + configuration: + description: "TraitConfiguration parameters configuration" + type: "object" + x-kubernetes-preserve-unknown-fields: true + required: + - "configuration" + type: "object" + knative: + description: "The configuration of Knative trait" + properties: + auto: + description: "Enable automatic discovery of all trait properties." + type: "boolean" + channelSinks: + description: "List of channels used as destination of integration routes. Can contain simple channel names or full Camel URIs." + items: + type: "string" + type: "array" + channelSources: + description: "List of channels used as source of integration routes. Can contain simple channel names or full Camel URIs." + items: + type: "string" + type: "array" + config: + description: "Can be used to inject a Knative complete configuration in JSON format." + type: "string" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + endpointSinks: + description: "List of endpoints used as destination of integration routes. Can contain simple endpoint names or full Camel URIs." + items: + type: "string" + type: "array" + endpointSources: + description: "List of channels used as source of integration routes." + items: + type: "string" + type: "array" + eventSinks: + description: "List of event types that the integration will produce. Can contain simple event types or full Camel URIs (to use a specific broker)." + items: + type: "string" + type: "array" + eventSources: + description: "List of event types that the integration will be subscribed to. Can contain simple event types or full Camel URIs (to use a specific broker different from \"default\")." + items: + type: "string" + type: "array" + filterSourceChannels: + description: "Enables filtering on events based on the header \"ce-knativehistory\". Since this header has been removed in newer versions of Knative, filtering is disabled by default." + type: "boolean" + namespaceLabel: + description: "Enables the camel-k-operator to set the \"bindings.knative.dev/include=true\" label to the namespace As Knative requires this label to perform injection of K_SINK URL into the service. If this is false, the integration pod may start and fail, read the SinkBinding Knative documentation. (default: true)" + type: "boolean" + sinkBinding: + description: "Allows binding the integration to a sink via a Knative SinkBinding resource. This can be used when the integration targets a single sink. It's enabled by default when the integration targets a single sink (except when the integration is owned by a Knative source)." + type: "boolean" + type: "object" + knative-service: + description: "The configuration of Knative Service trait" + properties: + annotations: + additionalProperties: + type: "string" + description: "The annotations added to route. This can be used to set knative service specific annotations CLI usage example: -t \"knative-service.annotations.'haproxy.router.openshift.io/balance'=true\"" + type: "object" + auto: + description: "Automatically deploy the integration as Knative service when all conditions hold: \n * Integration is using the Knative profile * All routes are either starting from an HTTP based consumer or a passive consumer (e.g. `direct` is a passive consumer)" + type: "boolean" + autoscalingMetric: + description: "Configures the Knative autoscaling metric property (e.g. to set `concurrency` based or `cpu` based autoscaling). \n Refer to the Knative documentation for more information." + type: "string" + autoscalingTarget: + description: "Sets the allowed concurrency level or CPU percentage (depending on the autoscaling metric) for each Pod. \n Refer to the Knative documentation for more information." + type: "integer" + class: + description: "Configures the Knative autoscaling class property (e.g. to set `hpa.autoscaling.knative.dev` or `kpa.autoscaling.knative.dev` autoscaling). \n Refer to the Knative documentation for more information." + enum: + - "kpa.autoscaling.knative.dev" + - "hpa.autoscaling.knative.dev" + type: "string" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + maxScale: + description: "An upper bound for the number of Pods that can be running in parallel for the integration. Knative has its own cap value that depends on the installation. \n Refer to the Knative documentation for more information." + type: "integer" + minScale: + description: "The minimum number of Pods that should be running at any time for the integration. It's **zero** by default, meaning that the integration is scaled down to zero when not used for a configured amount of time. \n Refer to the Knative documentation for more information." + type: "integer" + rolloutDuration: + description: "Enables to gradually shift traffic to the latest Revision and sets the rollout duration. It's disabled by default and must be expressed as a Golang `time.Duration` string representation, rounded to a second precision." + type: "string" + visibility: + description: "Setting `cluster-local`, Knative service becomes a private service. Specifically, this option applies the `networking.knative.dev/visibility` label to Knative service. \n Refer to the Knative documentation for more information." + enum: + - "cluster-local" + type: "string" + type: "object" + logging: + description: "The configuration of Logging trait" + properties: + color: + description: "Colorize the log output" + type: "boolean" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + format: + description: "Logs message format" + type: "string" + json: + description: "Output the logs in JSON" + type: "boolean" + jsonPrettyPrint: + description: "Enable \"pretty printing\" of the JSON logs" + type: "boolean" + level: + description: "Adjust the logging level (defaults to `INFO`)" + enum: + - "FATAL" + - "WARN" + - "INFO" + - "DEBUG" + - "TRACE" + type: "string" + type: "object" + master: + description: "Deprecated: for backward compatibility." + properties: + configuration: + description: "TraitConfiguration parameters configuration" + type: "object" + x-kubernetes-preserve-unknown-fields: true + required: + - "configuration" + type: "object" + mount: + description: "The configuration of Mount trait" + properties: + configs: + description: "A list of configuration pointing to configmap/secret. The configuration are expected to be UTF-8 resources as they are processed by runtime Camel Context and tried to be parsed as property files. They are also made available on the classpath in order to ease their usage directly from the Route. Syntax: [configmap|secret]:name[/key], where name represents the resource name and key optionally represents the resource key to be filtered" + items: + type: "string" + type: "array" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + hotReload: + description: "Enable \"hot reload\" when a secret/configmap mounted is edited (default `false`). The configmap/secret must be marked with `camel.apache.org/integration` label to be taken in account." + type: "boolean" + resources: + description: "A list of resources (text or binary content) pointing to configmap/secret. The resources are expected to be any resource type (text or binary content). The destination path can be either a default location or any path specified by the user. Syntax: [configmap|secret]:name[/key][@path], where name represents the resource name, key optionally represents the resource key to be filtered and path represents the destination path" + items: + type: "string" + type: "array" + volumes: + description: "A list of Persistent Volume Claims to be mounted. Syntax: [pvcname:/container/path]" + items: + type: "string" + type: "array" + type: "object" + openapi: + description: "The configuration of OpenAPI trait" + properties: + configmaps: + description: "The configmaps holding the spec of the OpenAPI" + items: + type: "string" + type: "array" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + type: "object" + owner: + description: "The configuration of Owner trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + targetAnnotations: + description: "The set of annotations to be transferred" + items: + type: "string" + type: "array" + targetLabels: + description: "The set of labels to be transferred" + items: + type: "string" + type: "array" + type: "object" + pdb: + description: "The configuration of PDB trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + maxUnavailable: + description: "The number of pods for the Integration that can be unavailable after an eviction. It can be either an absolute number or a percentage (default `1` if `min-available` is also not set). Only one of `max-unavailable` and `min-available` can be specified." + type: "string" + minAvailable: + description: "The number of pods for the Integration that must still be available after an eviction. It can be either an absolute number or a percentage. Only one of `min-available` and `max-unavailable` can be specified." + type: "string" + type: "object" + platform: + description: "The configuration of Platform trait" + properties: + auto: + description: "To automatically detect from the environment if a default platform can be created (it will be created on OpenShift only)." + type: "boolean" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + createDefault: + description: "To create a default (empty) platform when the platform is missing." + type: "boolean" + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + global: + description: "Indicates if the platform should be created globally in the case of global operator (default true)." + type: "boolean" + type: "object" + pod: + description: "The configuration of Pod trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + type: "object" + prometheus: + description: "The configuration of Prometheus trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + podMonitor: + description: "Whether a `PodMonitor` resource is created (default `true`)." + type: "boolean" + podMonitorLabels: + description: "The `PodMonitor` resource labels, applicable when `pod-monitor` is `true`." + items: + type: "string" + type: "array" + type: "object" + pull-secret: + description: "The configuration of Pull Secret trait" + properties: + auto: + description: "Automatically configures the platform registry secret on the pod if it is of type `kubernetes.io/dockerconfigjson`." + type: "boolean" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + imagePullerDelegation: + description: "When using a global operator with a shared platform, this enables delegation of the `system:image-puller` cluster role on the operator namespace to the integration service account." + type: "boolean" + secretName: + description: "The pull secret name to set on the Pod. If left empty this is automatically taken from the `IntegrationPlatform` registry configuration." + type: "string" + type: "object" + quarkus: + description: "The configuration of Quarkus trait" + properties: + buildMode: + description: "The Quarkus mode to run: either `jvm` or `native` (default `jvm`). In case both `jvm` and `native` are specified, two `IntegrationKit` resources are created, with the `native` kit having precedence over the `jvm` one once ready." + items: + description: "QuarkusMode is the type of Quarkus build packaging." + enum: + - "jvm" + - "native" + type: "string" + type: "array" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Deprecated: no longer in use." + type: "boolean" + nativeBaseImage: + description: "The base image to use when running a native build (default `quay.io/quarkus/quarkus-micro-image:2.0`)" + type: "string" + nativeBuilderImage: + description: "The image containing the tooling required for a native build (by default it will use the one provided in the runtime catalog)" + type: "string" + packageTypes: + description: "The Quarkus package types, `fast-jar` or `native` (default `fast-jar`). In case both `fast-jar` and `native` are specified, two `IntegrationKit` resources are created, with the native kit having precedence over the `fast-jar` one once ready. The order influences the resolution of the current kit for the integration. The kit corresponding to the first package type will be assigned to the integration in case no existing kit that matches the integration exists. Deprecated: use `build-mode` instead." + items: + description: "QuarkusPackageType is the type of Quarkus build packaging. Deprecated: use `QuarkusMode` instead." + enum: + - "fast-jar" + - "native" + type: "string" + type: "array" + type: "object" + registry: + description: "The configuration of Registry trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + type: "object" + route: + description: "The configuration of Route trait" + properties: + annotations: + additionalProperties: + type: "string" + description: "The annotations added to route. This can be used to set route specific annotations For annotations options see https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#route-specific-annotations CLI usage example: -t \"route.annotations.'haproxy.router.openshift.io/balance'=true\"" + type: "object" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + host: + description: "To configure the host exposed by the route." + type: "string" + tlsCACertificate: + description: "The TLS CA certificate contents. \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsCACertificateSecret: + description: "The secret name and key reference to the TLS CA certificate. The format is \"secret-name[/key-name]\", the value represents the secret name, if there is only one key in the secret it will be read, otherwise you can set a key name separated with a \"/\". \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsCertificate: + description: "The TLS certificate contents. \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsCertificateSecret: + description: "The secret name and key reference to the TLS certificate. The format is \"secret-name[/key-name]\", the value represents the secret name, if there is only one key in the secret it will be read, otherwise you can set a key name separated with a \"/\". \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsDestinationCACertificate: + description: "The destination CA certificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsDestinationCACertificateSecret: + description: "The secret name and key reference to the destination CA certificate. The format is \"secret-name[/key-name]\", the value represents the secret name, if there is only one key in the secret it will be read, otherwise you can set a key name separated with a \"/\". \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsInsecureEdgeTerminationPolicy: + description: "To configure how to deal with insecure traffic, e.g. `Allow`, `Disable` or `Redirect` traffic. \n Refer to the OpenShift route documentation for additional information." + enum: + - "None" + - "Allow" + - "Redirect" + type: "string" + tlsKey: + description: "The TLS certificate key contents. \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsKeySecret: + description: "The secret name and key reference to the TLS certificate key. The format is \"secret-name[/key-name]\", the value represents the secret name, if there is only one key in the secret it will be read, otherwise you can set a key name separated with a \"/\". \n Refer to the OpenShift route documentation for additional information." + type: "string" + tlsTermination: + description: "The TLS termination type, like `edge`, `passthrough` or `reencrypt`. \n Refer to the OpenShift route documentation for additional information." + enum: + - "edge" + - "reencrypt" + - "passthrough" + type: "string" + type: "object" + service: + description: "The configuration of Service trait" + properties: + auto: + description: "To automatically detect from the code if a Service needs to be created." + type: "boolean" + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + nodePort: + description: "Enable Service to be exposed as NodePort (default `false`). Deprecated: Use service type instead." + type: "boolean" + type: + description: "The type of service to be used, either 'ClusterIP', 'NodePort' or 'LoadBalancer'." + enum: + - "ClusterIP" + - "NodePort" + - "LoadBalancer" + type: "string" + type: "object" + service-binding: + description: "The configuration of Service Binding trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + services: + description: "List of Services in the form [[apigroup/]version:]kind:[namespace/]name" + items: + type: "string" + type: "array" + type: "object" + strimzi: + description: "Deprecated: for backward compatibility." + properties: + configuration: + description: "TraitConfiguration parameters configuration" + type: "object" + x-kubernetes-preserve-unknown-fields: true + required: + - "configuration" + type: "object" + toleration: + description: "The configuration of Toleration trait" + properties: + configuration: + description: "Legacy trait configuration parameters. Deprecated: for backward compatibility." + type: "object" + x-kubernetes-preserve-unknown-fields: true + enabled: + description: "Can be used to enable or disable a trait. All traits share this common property." + type: "boolean" + taints: + description: "The list of taints to tolerate, in the form `Key[=Value]:Effect[:Seconds]`" + items: + type: "string" + type: "array" + type: "object" + tracing: + description: "Deprecated: for backward compatibility." + properties: + configuration: + description: "TraitConfiguration parameters configuration" + type: "object" + x-kubernetes-preserve-unknown-fields: true + required: + - "configuration" + type: "object" + type: "object" version: description: "the operator version" type: "string" diff --git a/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1alpha1/argocds.yaml b/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1alpha1/argocds.yaml index 6ff62b507..8f26ad975 100644 --- a/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1alpha1/argocds.yaml +++ b/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1alpha1/argocds.yaml @@ -523,7 +523,7 @@ spec: description: "GATrackingID is the google analytics tracking ID to use." type: "string" grafana: - description: "Grafana defines the Grafana server options for ArgoCD." + description: "Deprecated: Grafana defines the Grafana server options for ArgoCD." properties: enabled: description: "Enabled will toggle Grafana support globally for ArgoCD." diff --git a/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1beta1/argocds.yaml b/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1beta1/argocds.yaml index 0b191f5ae..28dff2ccb 100644 --- a/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1beta1/argocds.yaml +++ b/crd-catalog/argoproj-labs/argocd-operator/argoproj.io/v1beta1/argocds.yaml @@ -166,9 +166,19 @@ spec: description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" + scmProviders: + description: "SCMProviders defines the list of allowed custom SCM provider API URLs" + items: + type: "string" + type: "array" scmRootCAConfigMap: description: "SCMRootCAConfigMap is the name of the config map that stores the Gitlab SCM Provider's TLS certificate which will be mounted on the ApplicationSet Controller (optional)." type: "string" + sourceNamespaces: + description: "SourceNamespaces defines the namespaces applicationset resources are allowed to be created in" + items: + type: "string" + type: "array" version: description: "Version is the Argo CD ApplicationSet image tag. (optional)" type: "string" @@ -472,7 +482,7 @@ spec: description: "GATrackingID is the google analytics tracking ID to use." type: "string" grafana: - description: "Grafana defines the Grafana server options for ArgoCD." + description: "Deprecated: Grafana defines the Grafana server options for ArgoCD." properties: enabled: description: "Enabled will toggle Grafana support globally for ArgoCD." diff --git a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/apis.yaml b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/apis.yaml index 40960a801..e1980a670 100644 --- a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/apis.yaml +++ b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/apis.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "apis.apigatewayv2.services.k8s.aws" spec: group: "apigatewayv2.services.k8s.aws" @@ -19,15 +19,15 @@ spec: description: "API is the Schema for the APIS API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "ApiSpec defines the desired state of Api. \n Represents an API." + description: "ApiSpec defines the desired state of Api.\n\n\nRepresents an API." properties: apiKeySelectionExpression: type: "string" @@ -36,7 +36,7 @@ spec: body: type: "string" corsConfiguration: - description: "Represents a CORS configuration. Supported only for HTTP APIs. See Configuring CORS (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-cors.html) for more information." + description: "Represents a CORS configuration. Supported only for HTTP APIs. See Configuring\nCORS (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-cors.html)\nfor more information." properties: allowCredentials: type: "boolean" @@ -96,13 +96,13 @@ spec: description: "APIStatus defines the observed state of API" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -118,9 +118,9 @@ spec: apiID: type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/authorizers.yaml b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/authorizers.yaml index 2665a0c06..5f3d36b47 100644 --- a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/authorizers.yaml +++ b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/authorizers.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "authorizers.apigatewayv2.services.k8s.aws" spec: group: "apigatewayv2.services.k8s.aws" @@ -19,23 +19,23 @@ spec: description: "Authorizer is the Schema for the Authorizers API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "AuthorizerSpec defines the desired state of Authorizer. \n Represents an authorizer." + description: "AuthorizerSpec defines the desired state of Authorizer.\n\n\nRepresents an authorizer." properties: apiID: type: "string" apiRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -61,7 +61,7 @@ spec: identityValidationExpression: type: "string" jwtConfiguration: - description: "Represents the configuration of a JWT authorizer. Required for the JWT authorizer type. Supported only for HTTP APIs." + description: "Represents the configuration of a JWT authorizer. Required for the JWT authorizer\ntype. Supported only for HTTP APIs." properties: audience: items: @@ -82,13 +82,13 @@ spec: description: "AuthorizerStatus defines the observed state of Authorizer" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -100,9 +100,9 @@ spec: authorizerID: type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/deployments.yaml b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/deployments.yaml index 64fd19eb1..df615ead5 100644 --- a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/deployments.yaml +++ b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/deployments.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "deployments.apigatewayv2.services.k8s.aws" spec: group: "apigatewayv2.services.k8s.aws" @@ -19,23 +19,23 @@ spec: description: "Deployment is the Schema for the Deployments API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DeploymentSpec defines the desired state of Deployment. \n An immutable representation of an API that can be called by users. A Deployment must be associated with a Stage for it to be callable over the internet." + description: "DeploymentSpec defines the desired state of Deployment.\n\n\nAn immutable representation of an API that can be called by users. A Deployment\nmust be associated with a Stage for it to be callable over the internet." properties: apiID: type: "string" apiRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -50,13 +50,13 @@ spec: description: "DeploymentStatus defines the observed state of Deployment" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -68,9 +68,9 @@ spec: autoDeployed: type: "boolean" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/integrations.yaml b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/integrations.yaml index 8a6b46acf..9eb110419 100644 --- a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/integrations.yaml +++ b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/integrations.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "integrations.apigatewayv2.services.k8s.aws" spec: group: "apigatewayv2.services.k8s.aws" @@ -19,23 +19,23 @@ spec: description: "Integration is the Schema for the Integrations API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "IntegrationSpec defines the desired state of Integration. \n Represents an integration." + description: "IntegrationSpec defines the desired state of Integration.\n\n\nRepresents an integration." properties: apiID: type: "string" apiRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -44,10 +44,10 @@ spec: connectionID: type: "string" connectionRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -93,7 +93,7 @@ spec: format: "int64" type: "integer" tlsConfig: - description: "The TLS configuration for a private integration. If you specify a TLS configuration, private integration traffic uses the HTTPS protocol. Supported only for HTTP APIs." + description: "The TLS configuration for a private integration. If you specify a TLS configuration,\nprivate integration traffic uses the HTTPS protocol. Supported only for HTTP\nAPIs." properties: serverNameToVerify: description: "A string with a length between [1-512]." @@ -106,13 +106,13 @@ spec: description: "IntegrationStatus defines the observed state of Integration" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -124,9 +124,9 @@ spec: apiGatewayManaged: type: "boolean" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/routes.yaml b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/routes.yaml index ca53d4629..7aedc5090 100644 --- a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/routes.yaml +++ b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/routes.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "routes.apigatewayv2.services.k8s.aws" spec: group: "apigatewayv2.services.k8s.aws" @@ -19,25 +19,25 @@ spec: description: "Route is the Schema for the Routes API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "RouteSpec defines the desired state of Route. \n Represents a route." + description: "RouteSpec defines the desired state of Route.\n\n\nRepresents a route." properties: apiID: type: "string" apiKeyRequired: type: "boolean" apiRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -52,10 +52,10 @@ spec: authorizerID: type: "string" authorizerRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -71,7 +71,7 @@ spec: type: "object" requestParameters: additionalProperties: - description: "Validation constraints imposed on parameters of a request (path, query string, headers)." + description: "Validation constraints imposed on parameters of a request (path, query string,\nheaders)." properties: required: type: "boolean" @@ -84,10 +84,10 @@ spec: target: type: "string" targetRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -100,13 +100,13 @@ spec: description: "RouteStatus defines the observed state of Route" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -118,9 +118,9 @@ spec: apiGatewayManaged: type: "boolean" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/stages.yaml b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/stages.yaml index d1c521d66..8df5c9f75 100644 --- a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/stages.yaml +++ b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/stages.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "stages.apigatewayv2.services.k8s.aws" spec: group: "apigatewayv2.services.k8s.aws" @@ -19,15 +19,15 @@ spec: description: "Stage is the Schema for the Stages API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "StageSpec defines the desired state of Stage. \n Represents an API stage." + description: "StageSpec defines the desired state of Stage.\n\n\nRepresents an API stage." properties: accessLogSettings: description: "Settings for logging access in a stage." @@ -42,10 +42,10 @@ spec: apiID: type: "string" apiRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -74,10 +74,10 @@ spec: deploymentID: type: "string" deploymentRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -120,13 +120,13 @@ spec: description: "StageStatus defines the observed state of Stage" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -138,9 +138,9 @@ spec: apiGatewayManaged: type: "boolean" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/vpclinks.yaml b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/vpclinks.yaml index a816de705..9d58ffa5c 100644 --- a/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/vpclinks.yaml +++ b/crd-catalog/aws-controllers-k8s/apigatewayv2-controller/apigatewayv2.services.k8s.aws/v1alpha1/vpclinks.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "vpclinks.apigatewayv2.services.k8s.aws" spec: group: "apigatewayv2.services.k8s.aws" @@ -19,15 +19,15 @@ spec: description: "VPCLink is the Schema for the VPCLinks API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "VpcLinkSpec defines the desired state of VpcLink. \n Represents a VPC link." + description: "VpcLinkSpec defines the desired state of VpcLink.\n\n\nRepresents a VPC link." properties: name: type: "string" @@ -51,13 +51,13 @@ spec: description: "VPCLinkStatus defines the observed state of VPCLink" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -67,9 +67,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalabletargets.yaml b/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalabletargets.yaml index 5e6090276..ff5c67219 100644 --- a/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalabletargets.yaml +++ b/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalabletargets.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "scalabletargets.applicationautoscaling.services.k8s.aws" spec: group: "applicationautoscaling.services.k8s.aws" @@ -19,38 +19,38 @@ spec: description: "ScalableTarget is the Schema for the ScalableTargets API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "ScalableTargetSpec defines the desired state of ScalableTarget. \n Represents a scalable target." + description: "ScalableTargetSpec defines the desired state of ScalableTarget.\n\n\nRepresents a scalable target." properties: maxCapacity: - description: "The maximum value that you plan to scale out to. When a scaling policy is in effect, Application Auto Scaling can scale out (expand) as needed to the maximum capacity limit in response to changing demand. This property is required when registering a new scalable target. \n Although you can specify a large maximum capacity, note that service quotas may impose lower limits. Each service has its own default quotas for the maximum capacity of the resource. If you want to specify a higher limit, you can request an increase. For more information, consult the documentation for that service. For information about the default quotas for each service, see Service Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html) in the Amazon Web Services General Reference." + description: "The maximum value that you plan to scale out to. When a scaling policy is\nin effect, Application Auto Scaling can scale out (expand) as needed to the\nmaximum capacity limit in response to changing demand. This property is required\nwhen registering a new scalable target.\n\n\nAlthough you can specify a large maximum capacity, note that service quotas\nmay impose lower limits. Each service has its own default quotas for the\nmaximum capacity of the resource. If you want to specify a higher limit,\nyou can request an increase. For more information, consult the documentation\nfor that service. For information about the default quotas for each service,\nsee Service Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html)\nin the Amazon Web Services General Reference." format: "int64" type: "integer" minCapacity: - description: "The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target. \n For certain resources, the minimum value allowed is 0. This includes Lambda provisioned concurrency, Spot Fleet, ECS services, Aurora DB clusters, EMR clusters, and custom resources. For all other resources, the minimum value allowed is 1." + description: "The minimum value that you plan to scale in to. When a scaling policy is\nin effect, Application Auto Scaling can scale in (contract) as needed to\nthe minimum capacity limit in response to changing demand. This property\nis required when registering a new scalable target.\n\n\nFor certain resources, the minimum value allowed is 0. This includes Lambda\nprovisioned concurrency, Spot Fleet, ECS services, Aurora DB clusters, EMR\nclusters, and custom resources. For all other resources, the minimum value\nallowed is 1." format: "int64" type: "integer" resourceID: - description: "The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier. \n * ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp. \n * Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. \n * EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. \n * AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet. \n * DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table. \n * DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index. \n * Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster. \n * SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. \n * Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). \n * Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. \n * Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. \n * Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1. \n * Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable. \n * Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. \n * Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. \n * Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster." + description: "The identifier of the resource that is associated with the scalable target.\nThis string consists of the resource type and unique identifier.\n\n\n * ECS service - The resource type is service and the unique identifier\n is the cluster name and service name. Example: service/default/sample-webapp.\n\n\n * Spot Fleet - The resource type is spot-fleet-request and the unique\n identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.\n\n\n * EMR cluster - The resource type is instancegroup and the unique identifier\n is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.\n\n\n * AppStream 2.0 fleet - The resource type is fleet and the unique identifier\n is the fleet name. Example: fleet/sample-fleet.\n\n\n * DynamoDB table - The resource type is table and the unique identifier\n is the table name. Example: table/my-table.\n\n\n * DynamoDB global secondary index - The resource type is index and the\n unique identifier is the index name. Example: table/my-table/index/my-table-index.\n\n\n * Aurora DB cluster - The resource type is cluster and the unique identifier\n is the cluster name. Example: cluster:my-db-cluster.\n\n\n * SageMaker endpoint variant - The resource type is variant and the unique\n identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.\n\n\n * Custom resources are not supported with a resource type. This parameter\n must specify the OutputValue from the CloudFormation template stack used\n to access the resources. The unique identifier is defined by the service\n provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource).\n\n\n * Amazon Comprehend document classification endpoint - The resource type\n and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.\n\n\n * Amazon Comprehend entity recognizer endpoint - The resource type and\n unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.\n\n\n * Lambda provisioned concurrency - The resource type is function and the\n unique identifier is the function name with a function version or alias\n name suffix that is not $LATEST. Example: function:my-function:prod or\n function:my-function:1.\n\n\n * Amazon Keyspaces table - The resource type is table and the unique identifier\n is the table name. Example: keyspace/mykeyspace/table/mytable.\n\n\n * Amazon MSK cluster - The resource type and unique identifier are specified\n using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.\n\n\n * Amazon ElastiCache replication group - The resource type is replication-group\n and the unique identifier is the replication group name. Example: replication-group/mycluster.\n\n\n * Neptune cluster - The resource type is cluster and the unique identifier\n is the cluster name. Example: cluster:mycluster." type: "string" roleARN: - description: "This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. \n If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM roles (https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles)." + description: "This parameter is required for services that do not support service-linked\nroles (such as Amazon EMR), and it must specify the ARN of an IAM role that\nallows Application Auto Scaling to modify the scalable target on your behalf.\n\n\nIf the service supports service-linked roles, Application Auto Scaling uses\na service-linked role, which it creates if it does not yet exist. For more\ninformation, see Application Auto Scaling IAM roles (https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles)." type: "string" scalableDimension: - description: "The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. \n * ecs:service:DesiredCount - The desired task count of an ECS service. \n * elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. \n * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. \n * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. \n * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. \n * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. \n * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. \n * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. \n * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. \n * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model endpoint variant. \n * custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. \n * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. \n * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint. \n * lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function. \n * cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table. \n * cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table. \n * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster. \n * elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group. \n * elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group. \n * neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster." + description: "The scalable dimension associated with the scalable target. This string consists\nof the service namespace, resource type, and scaling property.\n\n\n * ecs:service:DesiredCount - The desired task count of an ECS service.\n\n\n * elasticmapreduce:instancegroup:InstanceCount - The instance count of\n an EMR Instance Group.\n\n\n * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot\n Fleet.\n\n\n * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream\n 2.0 fleet.\n\n\n * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for\n a DynamoDB table.\n\n\n * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for\n a DynamoDB table.\n\n\n * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for\n a DynamoDB global secondary index.\n\n\n * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for\n a DynamoDB global secondary index.\n\n\n * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora\n DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible\n edition.\n\n\n * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances\n for an SageMaker model endpoint variant.\n\n\n * custom-resource:ResourceType:Property - The scalable dimension for a\n custom resource provided by your own application or service.\n\n\n * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The\n number of inference units for an Amazon Comprehend document classification\n endpoint.\n\n\n * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number\n of inference units for an Amazon Comprehend entity recognizer endpoint.\n\n\n * lambda:function:ProvisionedConcurrency - The provisioned concurrency\n for a Lambda function.\n\n\n * cassandra:table:ReadCapacityUnits - The provisioned read capacity for\n an Amazon Keyspaces table.\n\n\n * cassandra:table:WriteCapacityUnits - The provisioned write capacity\n for an Amazon Keyspaces table.\n\n\n * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB)\n for brokers in an Amazon MSK cluster.\n\n\n * elasticache:replication-group:NodeGroups - The number of node groups\n for an Amazon ElastiCache replication group.\n\n\n * elasticache:replication-group:Replicas - The number of replicas per\n node group for an Amazon ElastiCache replication group.\n\n\n * neptune:cluster:ReadReplicaCount - The count of read replicas in an\n Amazon Neptune DB cluster." type: "string" serviceNamespace: - description: "The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead." + description: "The namespace of the Amazon Web Services service that provides the resource.\nFor a resource provided by your own application or service, use custom-resource\ninstead." type: "string" suspendedState: - description: "An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities. \n Suspension Outcomes \n * For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended. \n * For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended. \n * For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended. \n For more information, see Suspending and resuming scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) in the Application Auto Scaling User Guide." + description: "An embedded object that contains attributes and attribute values that are\nused to suspend and resume automatic scaling. Setting the value of an attribute\nto true suspends the specified scaling activities. Setting it to false (default)\nresumes the specified scaling activities.\n\n\nSuspension Outcomes\n\n\n * For DynamicScalingInSuspended, while a suspension is in effect, all\n scale-in activities that are triggered by a scaling policy are suspended.\n\n\n * For DynamicScalingOutSuspended, while a suspension is in effect, all\n scale-out activities that are triggered by a scaling policy are suspended.\n\n\n * For ScheduledScalingSuspended, while a suspension is in effect, all\n scaling activities that involve scheduled actions are suspended.\n\n\nFor more information, see Suspending and resuming scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html)\nin the Application Auto Scaling User Guide." properties: dynamicScalingInSuspended: type: "boolean" @@ -68,13 +68,13 @@ spec: description: "ScalableTargetStatus defines the observed state of ScalableTarget" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -84,9 +84,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalingpolicies.yaml b/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalingpolicies.yaml index 216a8b118..9137d4364 100644 --- a/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalingpolicies.yaml +++ b/crd-catalog/aws-controllers-k8s/applicationautoscaling-controller/applicationautoscaling.services.k8s.aws/v1alpha1/scalingpolicies.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "scalingpolicies.applicationautoscaling.services.k8s.aws" spec: group: "applicationautoscaling.services.k8s.aws" @@ -19,33 +19,33 @@ spec: description: "ScalingPolicy is the Schema for the ScalingPolicies API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "ScalingPolicySpec defines the desired state of ScalingPolicy. \n Represents a scaling policy to use with Application Auto Scaling. \n For more information about configuring scaling policies for a specific service, see Getting started with Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/getting-started.html) in the Application Auto Scaling User Guide." + description: "ScalingPolicySpec defines the desired state of ScalingPolicy.\n\n\nRepresents a scaling policy to use with Application Auto Scaling.\n\n\nFor more information about configuring scaling policies for a specific service,\nsee Getting started with Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/getting-started.html)\nin the Application Auto Scaling User Guide." properties: policyName: description: "The name of the scaling policy." type: "string" policyType: - description: "The policy type. This parameter is required if you are creating a scaling policy. \n The following policy types are supported: \n TargetTrackingScaling—Not supported for Amazon EMR \n StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune. \n For more information, see Target tracking scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and Step scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) in the Application Auto Scaling User Guide." + description: "The policy type. This parameter is required if you are creating a scaling\npolicy.\n\n\nThe following policy types are supported:\n\n\nTargetTrackingScaling—Not supported for Amazon EMR\n\n\nStepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon\nKeyspaces, Amazon MSK, Amazon ElastiCache, or Neptune.\n\n\nFor more information, see Target tracking scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html)\nand Step scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html)\nin the Application Auto Scaling User Guide." type: "string" resourceID: - description: "The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. \n * ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp. \n * Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. \n * EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. \n * AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet. \n * DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table. \n * DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index. \n * Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster. \n * SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. \n * Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). \n * Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. \n * Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. \n * Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1. \n * Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable. \n * Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. \n * Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. \n * Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster." + description: "The identifier of the resource associated with the scaling policy. This string\nconsists of the resource type and unique identifier.\n\n\n * ECS service - The resource type is service and the unique identifier\n is the cluster name and service name. Example: service/default/sample-webapp.\n\n\n * Spot Fleet - The resource type is spot-fleet-request and the unique\n identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE.\n\n\n * EMR cluster - The resource type is instancegroup and the unique identifier\n is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0.\n\n\n * AppStream 2.0 fleet - The resource type is fleet and the unique identifier\n is the fleet name. Example: fleet/sample-fleet.\n\n\n * DynamoDB table - The resource type is table and the unique identifier\n is the table name. Example: table/my-table.\n\n\n * DynamoDB global secondary index - The resource type is index and the\n unique identifier is the index name. Example: table/my-table/index/my-table-index.\n\n\n * Aurora DB cluster - The resource type is cluster and the unique identifier\n is the cluster name. Example: cluster:my-db-cluster.\n\n\n * SageMaker endpoint variant - The resource type is variant and the unique\n identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering.\n\n\n * Custom resources are not supported with a resource type. This parameter\n must specify the OutputValue from the CloudFormation template stack used\n to access the resources. The unique identifier is defined by the service\n provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource).\n\n\n * Amazon Comprehend document classification endpoint - The resource type\n and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE.\n\n\n * Amazon Comprehend entity recognizer endpoint - The resource type and\n unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE.\n\n\n * Lambda provisioned concurrency - The resource type is function and the\n unique identifier is the function name with a function version or alias\n name suffix that is not $LATEST. Example: function:my-function:prod or\n function:my-function:1.\n\n\n * Amazon Keyspaces table - The resource type is table and the unique identifier\n is the table name. Example: keyspace/mykeyspace/table/mytable.\n\n\n * Amazon MSK cluster - The resource type and unique identifier are specified\n using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5.\n\n\n * Amazon ElastiCache replication group - The resource type is replication-group\n and the unique identifier is the replication group name. Example: replication-group/mycluster.\n\n\n * Neptune cluster - The resource type is cluster and the unique identifier\n is the cluster name. Example: cluster:mycluster." type: "string" scalableDimension: - description: "The scalable dimension. This string consists of the service namespace, resource type, and scaling property. \n * ecs:service:DesiredCount - The desired task count of an ECS service. \n * elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. \n * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. \n * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. \n * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. \n * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. \n * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. \n * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. \n * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. \n * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model endpoint variant. \n * custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. \n * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. \n * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint. \n * lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function. \n * cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table. \n * cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table. \n * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster. \n * elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group. \n * elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group. \n * neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster." + description: "The scalable dimension. This string consists of the service namespace, resource\ntype, and scaling property.\n\n\n * ecs:service:DesiredCount - The desired task count of an ECS service.\n\n\n * elasticmapreduce:instancegroup:InstanceCount - The instance count of\n an EMR Instance Group.\n\n\n * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot\n Fleet.\n\n\n * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream\n 2.0 fleet.\n\n\n * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for\n a DynamoDB table.\n\n\n * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for\n a DynamoDB table.\n\n\n * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for\n a DynamoDB global secondary index.\n\n\n * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for\n a DynamoDB global secondary index.\n\n\n * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora\n DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible\n edition.\n\n\n * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances\n for an SageMaker model endpoint variant.\n\n\n * custom-resource:ResourceType:Property - The scalable dimension for a\n custom resource provided by your own application or service.\n\n\n * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The\n number of inference units for an Amazon Comprehend document classification\n endpoint.\n\n\n * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number\n of inference units for an Amazon Comprehend entity recognizer endpoint.\n\n\n * lambda:function:ProvisionedConcurrency - The provisioned concurrency\n for a Lambda function.\n\n\n * cassandra:table:ReadCapacityUnits - The provisioned read capacity for\n an Amazon Keyspaces table.\n\n\n * cassandra:table:WriteCapacityUnits - The provisioned write capacity\n for an Amazon Keyspaces table.\n\n\n * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB)\n for brokers in an Amazon MSK cluster.\n\n\n * elasticache:replication-group:NodeGroups - The number of node groups\n for an Amazon ElastiCache replication group.\n\n\n * elasticache:replication-group:Replicas - The number of replicas per\n node group for an Amazon ElastiCache replication group.\n\n\n * neptune:cluster:ReadReplicaCount - The count of read replicas in an\n Amazon Neptune DB cluster." type: "string" serviceNamespace: - description: "The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead." + description: "The namespace of the Amazon Web Services service that provides the resource.\nFor a resource provided by your own application or service, use custom-resource\ninstead." type: "string" stepScalingPolicyConfiguration: - description: "A step scaling policy. \n This parameter is required if you are creating a policy and the policy type is StepScaling." + description: "A step scaling policy.\n\n\nThis parameter is required if you are creating a policy and the policy type\nis StepScaling." properties: adjustmentType: type: "string" @@ -59,7 +59,7 @@ spec: type: "integer" stepAdjustments: items: - description: "Represents a step adjustment for a StepScalingPolicyConfiguration (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepScalingPolicyConfiguration.html). Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm. \n For the following examples, suppose that you have an alarm with a breach threshold of 50: \n * To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10. \n * To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0. \n There are a few rules for the step adjustments for your step policy: \n * The ranges of your step adjustments can't overlap or have a gap. \n * At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound. \n * At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound. \n * The upper and lower bound can't be null in the same step adjustment." + description: "Represents a step adjustment for a StepScalingPolicyConfiguration (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepScalingPolicyConfiguration.html).\nDescribes an adjustment based on the difference between the value of the\naggregated CloudWatch metric and the breach threshold that you've defined\nfor the alarm.\n\n\nFor the following examples, suppose that you have an alarm with a breach\nthreshold of 50:\n\n\n * To trigger the adjustment when the metric is greater than or equal to\n 50 and less than 60, specify a lower bound of 0 and an upper bound of\n 10.\n\n\n * To trigger the adjustment when the metric is greater than 40 and less\n than or equal to 50, specify a lower bound of -10 and an upper bound of\n 0.\n\n\nThere are a few rules for the step adjustments for your step policy:\n\n\n * The ranges of your step adjustments can't overlap or have a gap.\n\n\n * At most one step adjustment can have a null lower bound. If one step\n adjustment has a negative lower bound, then there must be a step adjustment\n with a null lower bound.\n\n\n * At most one step adjustment can have a null upper bound. If one step\n adjustment has a positive upper bound, then there must be a step adjustment\n with a null upper bound.\n\n\n * The upper and lower bound can't be null in the same step adjustment." properties: metricIntervalLowerBound: type: "number" @@ -72,10 +72,10 @@ spec: type: "array" type: "object" targetTrackingScalingPolicyConfiguration: - description: "A target tracking scaling policy. Includes support for predefined or customized metrics. \n This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling." + description: "A target tracking scaling policy. Includes support for predefined or customized\nmetrics.\n\n\nThis parameter is required if you are creating a policy and the policy type\nis TargetTrackingScaling." properties: customizedMetricSpecification: - description: "Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling. \n For information about the available metrics for a service, see Amazon Web Services Services That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) in the Amazon CloudWatch User Guide. \n To create your customized metric specification: \n * Add values for each required parameter from CloudWatch. You can use an existing metric, or a new metric that you create. To use your own metric, you must first publish the metric to CloudWatch. For more information, see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) in the Amazon CloudWatch User Guide. \n * Choose a metric that changes proportionally with capacity. The value of the metric should increase or decrease in inverse proportion to the number of capacity units. That is, the value of the metric should decrease when capacity increases, and increase when capacity decreases. \n For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html)." + description: "Represents a CloudWatch metric of your choosing for a target tracking scaling\npolicy to use with Application Auto Scaling.\n\n\nFor information about the available metrics for a service, see Amazon Web\nServices Services That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html)\nin the Amazon CloudWatch User Guide.\n\n\nTo create your customized metric specification:\n\n\n * Add values for each required parameter from CloudWatch. You can use\n an existing metric, or a new metric that you create. To use your own metric,\n you must first publish the metric to CloudWatch. For more information,\n see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html)\n in the Amazon CloudWatch User Guide.\n\n\n * Choose a metric that changes proportionally with capacity. The value\n of the metric should increase or decrease in inverse proportion to the\n number of capacity units. That is, the value of the metric should decrease\n when capacity increases, and increase when capacity decreases.\n\n\nFor more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html)." properties: dimensions: items: @@ -99,7 +99,7 @@ spec: disableScaleIn: type: "boolean" predefinedMetricSpecification: - description: "Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling. \n Only the Amazon Web Services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Building dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) in the Application Auto Scaling User Guide." + description: "Represents a predefined metric for a target tracking scaling policy to use\nwith Application Auto Scaling.\n\n\nOnly the Amazon Web Services that you're using send metrics to Amazon CloudWatch.\nTo determine whether a desired metric already exists by looking up its namespace\nand dimension using the CloudWatch metrics dashboard in the console, follow\nthe procedure in Building dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html)\nin the Application Auto Scaling User Guide." properties: predefinedMetricType: type: "string" @@ -125,13 +125,13 @@ spec: description: "ScalingPolicyStatus defines the observed state of ScalingPolicy" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -152,9 +152,9 @@ spec: type: "object" type: "array" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/backups.yaml b/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/backups.yaml index 5216ff0a2..1712e0bee 100644 --- a/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/backups.yaml +++ b/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/backups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "backups.dynamodb.services.k8s.aws" spec: group: "dynamodb.services.k8s.aws" @@ -42,10 +42,10 @@ spec: description: "Backup is the Schema for the Backups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -66,13 +66,13 @@ spec: description: "BackupStatus defines the observed state of Backup" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -86,23 +86,23 @@ spec: format: "date-time" type: "string" backupExpiryDateTime: - description: "Time at which the automatic on-demand backup created by DynamoDB will expire. This SYSTEM on-demand backup expires automatically 35 days after its creation." + description: "Time at which the automatic on-demand backup created by DynamoDB will expire.\nThis SYSTEM on-demand backup expires automatically 35 days after its creation." format: "date-time" type: "string" backupSizeBytes: - description: "Size of the backup in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value." + description: "Size of the backup in bytes. DynamoDB updates this value approximately every\nsix hours. Recent changes might not be reflected in this value." format: "int64" type: "integer" backupStatus: description: "Backup can be in one of the following states: CREATING, ACTIVE, DELETED." type: "string" backupType: - description: "BackupType: \n * USER - You create and manage these using the on-demand backup feature. \n * SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM backup is automatically created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted table to the state it was in just before the point of deletion. \n * AWS_BACKUP - On-demand backup created by you from Backup service." + description: "BackupType:\n\n\n * USER - You create and manage these using the on-demand backup feature.\n\n\n * SYSTEM - If you delete a table with point-in-time recovery enabled,\n a SYSTEM backup is automatically created and is retained for 35 days (at\n no additional cost). System backups allow you to restore the deleted table\n to the state it was in just before the point of deletion.\n\n\n * AWS_BACKUP - On-demand backup created by you from Backup service." type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/globaltables.yaml b/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/globaltables.yaml index 895349262..69e96594e 100644 --- a/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/globaltables.yaml +++ b/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/globaltables.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "globaltables.dynamodb.services.k8s.aws" spec: group: "dynamodb.services.k8s.aws" @@ -19,15 +19,15 @@ spec: description: "GlobalTable is the Schema for the GlobalTables API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "GlobalTableSpec defines the desired state of GlobalTable. \n Represents the properties of a global table." + description: "GlobalTableSpec defines the desired state of GlobalTable.\n\n\nRepresents the properties of a global table." properties: globalTableName: description: "The global table name." @@ -49,13 +49,13 @@ spec: description: "GlobalTableStatus defines the observed state of GlobalTable" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -65,9 +65,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -95,7 +95,7 @@ spec: format: "date-time" type: "string" globalTableStatus: - description: "The current state of the global table: \n * CREATING - The global table is being created. \n * UPDATING - The global table is being updated. \n * DELETING - The global table is being deleted. \n * ACTIVE - The global table is ready for use." + description: "The current state of the global table:\n\n\n * CREATING - The global table is being created.\n\n\n * UPDATING - The global table is being updated.\n\n\n * DELETING - The global table is being deleted.\n\n\n * ACTIVE - The global table is ready for use." type: "string" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/tables.yaml b/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/tables.yaml index cab044c7a..1f05013e3 100644 --- a/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/tables.yaml +++ b/crd-catalog/aws-controllers-k8s/dynamodb-controller/dynamodb.services.k8s.aws/v1alpha1/tables.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "tables.dynamodb.services.k8s.aws" spec: group: "dynamodb.services.k8s.aws" @@ -36,10 +36,10 @@ spec: description: "Table is the Schema for the Tables API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -58,7 +58,7 @@ spec: type: "object" type: "array" billingMode: - description: "Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later. \n * PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). \n * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand)." + description: "Controls how you are charged for read and write throughput and how you manage\ncapacity. This setting can be changed later.\n\n\n * PROVISIONED - We recommend using PROVISIONED for predictable workloads.\n PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual).\n\n\n * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable\n workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand)." type: "string" continuousBackups: description: "Represents the settings used to enable point in time recovery." @@ -67,10 +67,10 @@ spec: type: "boolean" type: "object" deletionProtectionEnabled: - description: "Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table." + description: "Indicates whether deletion protection is to be enabled (true) or disabled\n(false) on the table." type: "boolean" globalSecondaryIndexes: - description: "One or more global secondary indexes (the maximum is 20) to be created on the table. Each global secondary index in the array includes the following: \n * IndexName - The name of the global secondary index. Must be unique only for this table. \n * KeySchema - Specifies the key schema for the global secondary index. \n * Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of: ProjectionType - One of the following: KEYS_ONLY - Only the index and primary keys are projected into the index. INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes is in NonKeyAttributes. ALL - All of the table attributes are projected into the index. NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. \n * ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units." + description: "One or more global secondary indexes (the maximum is 20) to be created on\nthe table. Each global secondary index in the array includes the following:\n\n\n * IndexName - The name of the global secondary index. Must be unique only\n for this table.\n\n\n * KeySchema - Specifies the key schema for the global secondary index.\n\n\n * Projection - Specifies attributes that are copied (projected) from the\n table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of: ProjectionType - One of the following: KEYS_ONLY\n - Only the index and primary keys are projected into the index. INCLUDE\n - Only the specified table attributes are projected into the index. The\n list of projected attributes is in NonKeyAttributes. ALL - All of the\n table attributes are projected into the index. NonKeyAttributes - A list\n of one or more non-key attribute names that are projected into the secondary\n index. The total count of attributes provided in NonKeyAttributes, summed\n across all of the secondary indexes, must not exceed 100. If you project\n the same attribute into two different indexes, this counts as two distinct\n attributes when determining the total.\n\n\n * ProvisionedThroughput - The provisioned throughput settings for the\n global secondary index, consisting of read and write capacity units." items: description: "Represents the properties of a global secondary index." properties: @@ -78,7 +78,7 @@ spec: type: "string" keySchema: items: - description: "Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. \n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. \n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map." + description: "Represents a single element of a key schema. A key schema specifies the attributes\nthat make up the primary key of a table, or the key attributes of an index.\n\n\nA KeySchemaElement represents exactly one attribute of the primary key. For\nexample, a simple primary key would be represented by one KeySchemaElement\n(for the partition key). A composite primary key would require one KeySchemaElement\nfor the partition key, and another KeySchemaElement for the sort key.\n\n\nA KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).\nThe data type must be one of String, Number, or Binary. The attribute cannot\nbe nested within a List or a Map." properties: attributeName: type: "string" @@ -87,7 +87,7 @@ spec: type: "object" type: "array" projection: - description: "Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected." + description: "Represents attributes that are copied (projected) from the table into an\nindex. These are in addition to the primary key attributes and index key\nattributes, which are automatically projected." properties: nonKeyAttributes: items: @@ -97,7 +97,7 @@ spec: type: "string" type: "object" provisionedThroughput: - description: "Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. \n For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide." + description: "Represents the provisioned throughput settings for a specified table or index.\nThe settings can be modified using the UpdateTable operation.\n\n\nFor current minimum and maximum provisioned throughput values, see Service,\nAccount, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)\nin the Amazon DynamoDB Developer Guide." properties: readCapacityUnits: format: "int64" @@ -109,9 +109,9 @@ spec: type: "object" type: "array" keySchema: - description: "Specifies the attributes that make up the primary key for a table or an index. The attributes in KeySchema must also be defined in the AttributeDefinitions array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) in the Amazon DynamoDB Developer Guide. \n Each KeySchemaElement in the array is composed of: \n * AttributeName - The name of this key attribute. \n * KeyType - The role that the key attribute will assume: HASH - partition key RANGE - sort key \n The partition key of an item is also known as its hash attribute. The term \"hash attribute\" derives from the DynamoDB usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. \n The sort key of an item is also known as its range attribute. The term \"range attribute\" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. \n For a simple primary key (partition key), you must provide exactly one element with a KeyType of HASH. \n For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order: The first element must have a KeyType of HASH, and the second element must have a KeyType of RANGE. \n For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) in the Amazon DynamoDB Developer Guide." + description: "Specifies the attributes that make up the primary key for a table or an index.\nThe attributes in KeySchema must also be defined in the AttributeDefinitions\narray. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html)\nin the Amazon DynamoDB Developer Guide.\n\n\nEach KeySchemaElement in the array is composed of:\n\n\n * AttributeName - The name of this key attribute.\n\n\n * KeyType - The role that the key attribute will assume: HASH - partition\n key RANGE - sort key\n\n\nThe partition key of an item is also known as its hash attribute. The term\n\"hash attribute\" derives from the DynamoDB usage of an internal hash function\nto evenly distribute data items across partitions, based on their partition\nkey values.\n\n\nThe sort key of an item is also known as its range attribute. The term \"range\nattribute\" derives from the way DynamoDB stores items with the same partition\nkey physically close together, in sorted order by the sort key value.\n\n\nFor a simple primary key (partition key), you must provide exactly one element\nwith a KeyType of HASH.\n\n\nFor a composite primary key (partition key and sort key), you must provide\nexactly two elements, in this order: The first element must have a KeyType\nof HASH, and the second element must have a KeyType of RANGE.\n\n\nFor more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key)\nin the Amazon DynamoDB Developer Guide." items: - description: "Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. \n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. \n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map." + description: "Represents a single element of a key schema. A key schema specifies the attributes\nthat make up the primary key of a table, or the key attributes of an index.\n\n\nA KeySchemaElement represents exactly one attribute of the primary key. For\nexample, a simple primary key would be represented by one KeySchemaElement\n(for the partition key). A composite primary key would require one KeySchemaElement\nfor the partition key, and another KeySchemaElement for the sort key.\n\n\nA KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).\nThe data type must be one of String, Number, or Binary. The attribute cannot\nbe nested within a List or a Map." properties: attributeName: type: "string" @@ -120,7 +120,7 @@ spec: type: "object" type: "array" localSecondaryIndexes: - description: "One or more local secondary indexes (the maximum is 5) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained. \n Each local secondary index in the array includes the following: \n * IndexName - The name of the local secondary index. Must be unique only for this table. \n * KeySchema - Specifies the key schema for the local secondary index. The key schema must begin with the same partition key as the table. \n * Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of: ProjectionType - One of the following: KEYS_ONLY - Only the index and primary keys are projected into the index. INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes is in NonKeyAttributes. ALL - All of the table attributes are projected into the index. NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total." + description: "One or more local secondary indexes (the maximum is 5) to be created on the\ntable. Each index is scoped to a given partition key value. There is a 10\nGB size limit per partition key value; otherwise, the size of a local secondary\nindex is unconstrained.\n\n\nEach local secondary index in the array includes the following:\n\n\n * IndexName - The name of the local secondary index. Must be unique only\n for this table.\n\n\n * KeySchema - Specifies the key schema for the local secondary index.\n The key schema must begin with the same partition key as the table.\n\n\n * Projection - Specifies attributes that are copied (projected) from the\n table into the index. These are in addition to the primary key attributes\n and index key attributes, which are automatically projected. Each attribute\n specification is composed of: ProjectionType - One of the following: KEYS_ONLY\n - Only the index and primary keys are projected into the index. INCLUDE\n - Only the specified table attributes are projected into the index. The\n list of projected attributes is in NonKeyAttributes. ALL - All of the\n table attributes are projected into the index. NonKeyAttributes - A list\n of one or more non-key attribute names that are projected into the secondary\n index. The total count of attributes provided in NonKeyAttributes, summed\n across all of the secondary indexes, must not exceed 100. If you project\n the same attribute into two different indexes, this counts as two distinct\n attributes when determining the total." items: description: "Represents the properties of a local secondary index." properties: @@ -128,7 +128,7 @@ spec: type: "string" keySchema: items: - description: "Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. \n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. \n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map." + description: "Represents a single element of a key schema. A key schema specifies the attributes\nthat make up the primary key of a table, or the key attributes of an index.\n\n\nA KeySchemaElement represents exactly one attribute of the primary key. For\nexample, a simple primary key would be represented by one KeySchemaElement\n(for the partition key). A composite primary key would require one KeySchemaElement\nfor the partition key, and another KeySchemaElement for the sort key.\n\n\nA KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).\nThe data type must be one of String, Number, or Binary. The attribute cannot\nbe nested within a List or a Map." properties: attributeName: type: "string" @@ -137,7 +137,7 @@ spec: type: "object" type: "array" projection: - description: "Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected." + description: "Represents attributes that are copied (projected) from the table into an\nindex. These are in addition to the primary key attributes and index key\nattributes, which are automatically projected." properties: nonKeyAttributes: items: @@ -149,7 +149,7 @@ spec: type: "object" type: "array" provisionedThroughput: - description: "Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. \n If you set BillingMode as PROVISIONED, you must specify this property. If you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. \n For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide." + description: "Represents the provisioned throughput settings for a specified table or index.\nThe settings can be modified using the UpdateTable operation.\n\n\nIf you set BillingMode as PROVISIONED, you must specify this property. If\nyou set BillingMode as PAY_PER_REQUEST, you cannot specify this property.\n\n\nFor current minimum and maximum provisioned throughput values, see Service,\nAccount, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)\nin the Amazon DynamoDB Developer Guide." properties: readCapacityUnits: format: "int64" @@ -169,7 +169,7 @@ spec: type: "string" type: "object" streamSpecification: - description: "The settings for DynamoDB Streams on the table. These settings consist of: \n * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) or disabled (false). \n * StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified item are written to the stream. NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream. OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream. NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream." + description: "The settings for DynamoDB Streams on the table. These settings consist of:\n\n\n * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled\n (true) or disabled (false).\n\n\n * StreamViewType - When an item in the table is modified, StreamViewType\n determines what information is written to the table's stream. Valid values\n for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified\n item are written to the stream. NEW_IMAGE - The entire item, as it appears\n after it was modified, is written to the stream. OLD_IMAGE - The entire\n item, as it appeared before it was modified, is written to the stream.\n NEW_AND_OLD_IMAGES - Both the new and the old item images of the item\n are written to the stream." properties: streamEnabled: type: "boolean" @@ -183,9 +183,9 @@ spec: description: "The name of the table to create." type: "string" tags: - description: "A list of key-value pairs to label the table. For more information, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)." + description: "A list of key-value pairs to label the table. For more information, see Tagging\nfor DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)." items: - description: "Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table. \n Amazon Web Services-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. Amazon Web Services-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: in the Cost Allocation Report. You cannot backdate the application of a tag. \n For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) in the Amazon DynamoDB Developer Guide." + description: "Describes a tag. A tag is a key-value pair. You can add up to 50 tags to\na single DynamoDB table.\n\n\nAmazon Web Services-assigned tag names and values are automatically assigned\nthe aws: prefix, which the user cannot assign. Amazon Web Services-assigned\ntag names do not count towards the tag limit of 50. User-assigned tag names\nhave the prefix user: in the Cost Allocation Report. You cannot backdate\nthe application of a tag.\n\n\nFor an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)\nin the Amazon DynamoDB Developer Guide." properties: key: type: "string" @@ -194,7 +194,7 @@ spec: type: "object" type: "array" timeToLive: - description: "Represents the settings used to enable or disable Time to Live for the specified table." + description: "Represents the settings used to enable or disable Time to Live for the specified\ntable." properties: attributeName: type: "string" @@ -210,13 +210,13 @@ spec: description: "TableStatus defines the observed state of Table" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -237,9 +237,9 @@ spec: type: "string" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -263,7 +263,7 @@ spec: type: "object" type: "array" creationDateTime: - description: "The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) format." + description: "The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/)\nformat." format: "date-time" type: "string" globalSecondaryIndexesDescriptions: @@ -286,7 +286,7 @@ spec: type: "integer" keySchema: items: - description: "Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. \n A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. \n A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map." + description: "Represents a single element of a key schema. A key schema specifies the attributes\nthat make up the primary key of a table, or the key attributes of an index.\n\n\nA KeySchemaElement represents exactly one attribute of the primary key. For\nexample, a simple primary key would be represented by one KeySchemaElement\n(for the partition key). A composite primary key would require one KeySchemaElement\nfor the partition key, and another KeySchemaElement for the sort key.\n\n\nA KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).\nThe data type must be one of String, Number, or Binary. The attribute cannot\nbe nested within a List or a Map." properties: attributeName: type: "string" @@ -295,7 +295,7 @@ spec: type: "object" type: "array" projection: - description: "Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected." + description: "Represents attributes that are copied (projected) from the table into an\nindex. These are in addition to the primary key attributes and index key\nattributes, which are automatically projected." properties: nonKeyAttributes: items: @@ -305,7 +305,7 @@ spec: type: "string" type: "object" provisionedThroughput: - description: "Represents the provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases." + description: "Represents the provisioned throughput settings for the table, consisting\nof read and write capacity units, along with data about increases and decreases." properties: lastDecreaseDateTime: format: "date-time" @@ -326,17 +326,17 @@ spec: type: "object" type: "array" globalTableVersion: - description: "Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) in use, if the table is replicated across Amazon Web Services Regions." + description: "Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)\nin use, if the table is replicated across Amazon Web Services Regions." type: "string" itemCount: - description: "The number of items in the specified table. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value." + description: "The number of items in the specified table. DynamoDB updates this value approximately\nevery six hours. Recent changes might not be reflected in this value." format: "int64" type: "integer" latestStreamARN: - description: "The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this table." + description: "The Amazon Resource Name (ARN) that uniquely identifies the latest stream\nfor this table." type: "string" latestStreamLabel: - description: "A timestamp, in ISO 8601 format, for this stream. \n Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique: \n * Amazon Web Services customer ID \n * Table name \n * StreamLabel" + description: "A timestamp, in ISO 8601 format, for this stream.\n\n\nNote that LatestStreamLabel is not a unique identifier for the stream, because\nit is possible that a stream from another table might have the same timestamp.\nHowever, the combination of the following three elements is guaranteed to\nbe unique:\n\n\n * Amazon Web Services customer ID\n\n\n * Table name\n\n\n * StreamLabel" type: "string" replicas: description: "Represents replicas of the table." @@ -350,7 +350,7 @@ spec: indexName: type: "string" provisionedThroughputOverride: - description: "Replica-specific provisioned throughput settings. If not specified, uses the source table's provisioned throughput settings." + description: "Replica-specific provisioned throughput settings. If not specified, uses\nthe source table's provisioned throughput settings." properties: readCapacityUnits: format: "int64" @@ -361,7 +361,7 @@ spec: kmsMasterKeyID: type: "string" provisionedThroughputOverride: - description: "Replica-specific provisioned throughput settings. If not specified, uses the source table's provisioned throughput settings." + description: "Replica-specific provisioned throughput settings. If not specified, uses\nthe source table's provisioned throughput settings." properties: readCapacityUnits: format: "int64" @@ -406,11 +406,11 @@ spec: description: "Unique identifier for the table for which the backup was created." type: "string" tableSizeBytes: - description: "The total size of the specified table, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value." + description: "The total size of the specified table, in bytes. DynamoDB updates this value\napproximately every six hours. Recent changes might not be reflected in this\nvalue." format: "int64" type: "integer" tableStatus: - description: "The current state of the table: \n * CREATING - The table is being created. \n * UPDATING - The table/index configuration is being updated. The table/index remains available for data operations when UPDATING. \n * DELETING - The table is being deleted. \n * ACTIVE - The table is ready for use. \n * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table in inaccessible. Table operations may fail due to failure to use the KMS key. DynamoDB will initiate the table archival process when a table's KMS key remains inaccessible for more than seven days. \n * ARCHIVING - The table is being archived. Operations are not allowed until archival is complete. \n * ARCHIVED - The table has been archived. See the ArchivalReason for more information." + description: "The current state of the table:\n\n\n * CREATING - The table is being created.\n\n\n * UPDATING - The table/index configuration is being updated. The table/index\n remains available for data operations when UPDATING.\n\n\n * DELETING - The table is being deleted.\n\n\n * ACTIVE - The table is ready for use.\n\n\n * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the\n table in inaccessible. Table operations may fail due to failure to use\n the KMS key. DynamoDB will initiate the table archival process when a\n table's KMS key remains inaccessible for more than seven days.\n\n\n * ARCHIVING - The table is being archived. Operations are not allowed\n until archival is complete.\n\n\n * ARCHIVED - The table has been archived. See the ArchivalReason for more\n information." type: "string" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/dhcpoptions.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/dhcpoptions.yaml index ad8fcbc1a..1fa98abe9 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/dhcpoptions.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/dhcpoptions.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dhcpoptions.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -23,15 +23,15 @@ spec: description: "DHCPOptions is the Schema for the DHCPOptions API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DhcpOptionsSpec defines the desired state of DhcpOptions. \n Describes a set of DHCP options." + description: "DhcpOptionsSpec defines the desired state of DhcpOptions.\n\n\nDescribes a set of DHCP options." properties: dhcpConfigurations: description: "A DHCP configuration option." @@ -46,7 +46,7 @@ spec: type: "object" type: "array" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -62,10 +62,10 @@ spec: type: "array" vpcRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -79,13 +79,13 @@ spec: description: "DHCPOptionsStatus defines the observed state of DHCPOptions" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -95,9 +95,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/elasticipaddresses.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/elasticipaddresses.yaml index b1df1d85f..33eced790 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/elasticipaddresses.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/elasticipaddresses.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "elasticipaddresses.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -26,10 +26,10 @@ spec: description: "ElasticIPAddress is the Schema for the ElasticIPAddresses API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -37,19 +37,19 @@ spec: description: "ElasticIPAddressSpec defines the desired state of ElasticIPAddress." properties: address: - description: "[EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address pool." + description: "[EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address\npool." type: "string" customerOwnedIPv4Pool: - description: "The ID of a customer-owned address pool. Use this parameter to let Amazon EC2 select an address from the address pool. Alternatively, specify a specific address from the address pool." + description: "The ID of a customer-owned address pool. Use this parameter to let Amazon\nEC2 select an address from the address pool. Alternatively, specify a specific\naddress from the address pool." type: "string" networkBorderGroup: - description: "A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. \n Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) to view the network border groups. \n You cannot use a network border group with EC2 Classic. If you attempt this operation on EC2 Classic, you receive an InvalidParameterCombination error." + description: "A unique set of Availability Zones, Local Zones, or Wavelength Zones from\nwhich Amazon Web Services advertises IP addresses. Use this parameter to\nlimit the IP address to this location. IP addresses cannot move between network\nborder groups.\n\n\nUse DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html)\nto view the network border groups.\n\n\nYou cannot use a network border group with EC2 Classic. If you attempt this\noperation on EC2 Classic, you receive an InvalidParameterCombination error." type: "string" publicIPv4Pool: - description: "The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool. To specify a specific address from the address pool, use the Address parameter instead." + description: "The ID of an address pool that you own. Use this parameter to let Amazon\nEC2 select an address from the address pool. To specify a specific address\nfrom the address pool, use the Address parameter instead." type: "string" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -64,13 +64,13 @@ spec: description: "ElasticIPAddressStatus defines the observed state of ElasticIPAddress" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -80,15 +80,15 @@ spec: - "region" type: "object" allocationID: - description: "[EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation of the Elastic IP address for use with instances in a VPC." + description: "[EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation\nof the Elastic IP address for use with instances in a VPC." type: "string" carrierIP: - description: "The carrier IP address. This option is only available for network interfaces which reside in a subnet in a Wavelength Zone (for example an EC2 instance)." + description: "The carrier IP address. This option is only available for network interfaces\nwhich reside in a subnet in a Wavelength Zone (for example an EC2 instance)." type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/instances.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/instances.yaml index d6184f45d..a424326b2 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/instances.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/instances.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "instances.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -23,20 +23,20 @@ spec: description: "Instance is the Schema for the Instances API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "InstanceSpec defines the desired state of Instance. \n Describes an instance." + description: "InstanceSpec defines the desired state of Instance.\n\n\nDescribes an instance." properties: blockDeviceMappings: - description: "The block device mapping, which defines the EBS volumes and instance store volumes to attach to the instance at launch. For more information, see Block device mappings (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) in the Amazon EC2 User Guide." + description: "The block device mapping, which defines the EBS volumes and instance store\nvolumes to attach to the instance at launch. For more information, see Block\ndevice mappings (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)\nin the Amazon EC2 User Guide." items: - description: "Describes a block device mapping, which defines the EBS volumes and instance store volumes to attach to an instance at launch." + description: "Describes a block device mapping, which defines the EBS volumes and instance\nstore volumes to attach to an instance at launch." properties: deviceName: type: "string" @@ -72,7 +72,7 @@ spec: type: "object" type: "array" capacityReservationSpecification: - description: "Information about the Capacity Reservation targeting option. If you do not specify this parameter, the instance's Capacity Reservation preference defaults to open, which enables it to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone)." + description: "Information about the Capacity Reservation targeting option. If you do not\nspecify this parameter, the instance's Capacity Reservation preference defaults\nto open, which enables it to run in any open Capacity Reservation that has\nmatching attributes (instance type, platform, Availability Zone)." properties: capacityReservationPreference: type: "string" @@ -86,7 +86,7 @@ spec: type: "object" type: "object" cpuOptions: - description: "The CPU options for the instance. For more information, see Optimize CPU options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) in the Amazon EC2 User Guide." + description: "The CPU options for the instance. For more information, see Optimize CPU\noptions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)\nin the Amazon EC2 User Guide." properties: coreCount: format: "int64" @@ -96,22 +96,22 @@ spec: type: "integer" type: "object" creditSpecification: - description: "The credit option for CPU usage of the burstable performance instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) in the Amazon EC2 User Guide. \n Default: standard (T2 instances) or unlimited (T3/T3a/T4g instances) \n For T3 instances with host tenancy, only standard is supported." + description: "The credit option for CPU usage of the burstable performance instance. Valid\nvalues are standard and unlimited. To change this attribute after launch,\nuse ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html).\nFor more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html)\nin the Amazon EC2 User Guide.\n\n\nDefault: standard (T2 instances) or unlimited (T3/T3a/T4g instances)\n\n\nFor T3 instances with host tenancy, only standard is supported." properties: cpuCredits: type: "string" type: "object" disableAPIStop: - description: "Indicates whether an instance is enabled for stop protection. For more information, see Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection)." + description: "Indicates whether an instance is enabled for stop protection. For more information,\nsee Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection)." type: "boolean" disableAPITermination: - description: "If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance. \n Default: false" + description: "If you set this parameter to true, you can't terminate the instance using\nthe Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute\nafter launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html).\nAlternatively, if you set InstanceInitiatedShutdownBehavior to terminate,\nyou can terminate the instance by running the shutdown command from the instance.\n\n\nDefault: false" type: "boolean" ebsOptimized: - description: "Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance. \n Default: false" + description: "Indicates whether the instance is optimized for Amazon EBS I/O. This optimization\nprovides dedicated throughput to Amazon EBS and an optimized configuration\nstack to provide optimal Amazon EBS I/O performance. This optimization isn't\navailable with all instance types. Additional usage charges apply when using\nan EBS-optimized instance.\n\n\nDefault: false" type: "boolean" elasticGPUSpecification: - description: "An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource that you can attach to your Windows instance to accelerate the graphics performance of your applications. For more information, see Amazon EC2 Elastic GPUs (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html) in the Amazon EC2 User Guide." + description: "An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource\nthat you can attach to your Windows instance to accelerate the graphics performance\nof your applications. For more information, see Amazon EC2 Elastic GPUs (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html)\nin the Amazon EC2 User Guide." items: description: "A specification for an Elastic Graphics accelerator." properties: @@ -120,7 +120,7 @@ spec: type: "object" type: "array" elasticInferenceAccelerators: - description: "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. \n You cannot specify accelerators from different generations in the same request." + description: "An elastic inference accelerator to associate with the instance. Elastic\ninference accelerators are a resource you can attach to your Amazon EC2 instances\nto accelerate your Deep Learning (DL) inference workloads.\n\n\nYou cannot specify accelerators from different generations in the same request." items: description: "Describes an elastic inference accelerator." properties: @@ -132,13 +132,13 @@ spec: type: "object" type: "array" enclaveOptions: - description: "Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) in the Amazon Web Services Nitro Enclaves User Guide. \n You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance." + description: "Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.\nFor more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)\nin the Amazon Web Services Nitro Enclaves User Guide.\n\n\nYou can't enable Amazon Web Services Nitro Enclaves and hibernation on the\nsame instance." properties: enabled: type: "boolean" type: "object" hibernationOptions: - description: "Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the Amazon EC2 User Guide. \n You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance." + description: "Indicates whether an instance is enabled for hibernation. For more information,\nsee Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html)\nin the Amazon EC2 User Guide.\n\n\nYou can't enable hibernation and Amazon Web Services Nitro Enclaves on the\nsame instance." properties: configured: type: "boolean" @@ -152,13 +152,13 @@ spec: type: "string" type: "object" imageID: - description: "The ID of the AMI. An AMI ID is required to launch an instance and must be specified here or in a launch template." + description: "The ID of the AMI. An AMI ID is required to launch an instance and must be\nspecified here or in a launch template." type: "string" instanceInitiatedShutdownBehavior: - description: "Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown). \n Default: stop" + description: "Indicates whether an instance stops or terminates when you initiate shutdown\nfrom the instance (using the operating system command for system shutdown).\n\n\nDefault: stop" type: "string" instanceMarketOptions: - description: "The market (purchasing) option for the instances. \n For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop." + description: "The market (purchasing) option for the instances.\n\n\nFor RunInstances, persistent Spot Instance requests are only supported when\nInstanceInterruptionBehavior is set to either hibernate or stop." properties: marketType: type: "string" @@ -180,14 +180,14 @@ spec: type: "object" type: "object" instanceType: - description: "The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the Amazon EC2 User Guide. \n Default: m1.small" + description: "The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)\nin the Amazon EC2 User Guide.\n\n\nDefault: m1.small" type: "string" ipv6AddressCount: - description: "[EC2-VPC] The number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch. \n You cannot specify this option and the network interfaces option in the same request." + description: "[EC2-VPC] The number of IPv6 addresses to associate with the primary network\ninterface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet.\nYou cannot specify this option and the option to assign specific IPv6 addresses\nin the same request. You can specify this option if you've specified a minimum\nnumber of instances to launch.\n\n\nYou cannot specify this option and the network interfaces option in the same\nrequest." format: "int64" type: "integer" ipv6Addresses: - description: "[EC2-VPC] The IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch. \n You cannot specify this option and the network interfaces option in the same request." + description: "[EC2-VPC] The IPv6 addresses from the range of the subnet to associate with\nthe primary network interface. You cannot specify this option and the option\nto assign a number of IPv6 addresses in the same request. You cannot specify\nthis option if you've specified a minimum number of instances to launch.\n\n\nYou cannot specify this option and the network interfaces option in the same\nrequest." items: description: "Describes an IPv6 address." properties: @@ -196,13 +196,13 @@ spec: type: "object" type: "array" kernelID: - description: "The ID of the kernel. \n We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the Amazon EC2 User Guide." + description: "The ID of the kernel.\n\n\nWe recommend that you use PV-GRUB instead of kernels and RAM disks. For more\ninformation, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)\nin the Amazon EC2 User Guide." type: "string" keyName: - description: "The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) or ImportKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html). \n If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in." + description: "The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html)\nor ImportKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html).\n\n\nIf you do not specify a key pair, you can't connect to the instance unless\nyou choose an AMI that is configured to allow users another way to log in." type: "string" launchTemplate: - description: "The launch template to use to launch the instances. Any parameters that you specify in RunInstances override the same parameters in the launch template. You can specify either the name or ID of a launch template, but not both." + description: "The launch template to use to launch the instances. Any parameters that you\nspecify in RunInstances override the same parameters in the launch template.\nYou can specify either the name or ID of a launch template, but not both." properties: launchTemplateID: type: "string" @@ -227,11 +227,11 @@ spec: type: "string" type: "object" maxCount: - description: "The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount. \n Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) in the Amazon EC2 FAQ." + description: "The maximum number of instances to launch. If you specify more instances\nthan Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches\nthe largest possible number of instances above MinCount.\n\n\nConstraints: Between 1 and the maximum number you're allowed for the specified\ninstance type. For more information about the default limits, and how to\nrequest an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2)\nin the Amazon EC2 FAQ." format: "int64" type: "integer" metadataOptions: - description: "The metadata options for the instance. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)." + description: "The metadata options for the instance. For more information, see Instance\nmetadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)." properties: httpEndpoint: type: "string" @@ -246,7 +246,7 @@ spec: type: "string" type: "object" minCount: - description: "The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances. \n Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) in the Amazon EC2 General FAQ." + description: "The minimum number of instances to launch. If you specify a minimum that\nis more instances than Amazon EC2 can launch in the target Availability Zone,\nAmazon EC2 launches no instances.\n\n\nConstraints: Between 1 and the maximum number you're allowed for the specified\ninstance type. For more information about the default limits, and how to\nrequest an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2)\nin the Amazon EC2 General FAQ." format: "int64" type: "integer" monitoring: @@ -256,7 +256,7 @@ spec: type: "boolean" type: "object" networkInterfaces: - description: "The network interfaces to associate with the instance. If you specify a network interface, you must specify any security groups and subnets as part of the network interface." + description: "The network interfaces to associate with the instance. If you specify a network\ninterface, you must specify any security groups and subnets as part of the\nnetwork interface." items: description: "Describes a network interface." properties: @@ -352,7 +352,7 @@ spec: type: "string" type: "object" privateDNSNameOptions: - description: "The options for the instance hostname. The default values are inherited from the subnet." + description: "The options for the instance hostname. The default values are inherited from\nthe subnet." properties: enableResourceNameDNSAAAARecord: type: "boolean" @@ -362,26 +362,26 @@ spec: type: "string" type: "object" privateIPAddress: - description: "[EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet. \n Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request. \n You cannot specify this option and the network interfaces option in the same request." + description: "[EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4\naddress range of the subnet.\n\n\nOnly one private IP address can be designated as primary. You can't specify\nthis option if you've specified the option to designate a private IP address\nas the primary IP address in a network interface specification. You cannot\nspecify this option if you're launching more than one instance in the request.\n\n\nYou cannot specify this option and the network interfaces option in the same\nrequest." type: "string" ramDiskID: - description: "The ID of the RAM disk to select. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, go to the Amazon Web Services Resource Center and search for the kernel ID. \n We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the Amazon EC2 User Guide." + description: "The ID of the RAM disk to select. Some kernels require additional drivers\nat launch. Check the kernel requirements for information about whether you\nneed to specify a RAM disk. To find kernel requirements, go to the Amazon\nWeb Services Resource Center and search for the kernel ID.\n\n\nWe recommend that you use PV-GRUB instead of kernels and RAM disks. For more\ninformation, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)\nin the Amazon EC2 User Guide." type: "string" securityGroupIDs: - description: "The IDs of the security groups. You can create a security group using CreateSecurityGroup (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). \n If you specify a network interface, you must specify any security groups as part of the network interface." + description: "The IDs of the security groups. You can create a security group using CreateSecurityGroup\n(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html).\n\n\nIf you specify a network interface, you must specify any security groups\nas part of the network interface." items: type: "string" type: "array" securityGroups: - description: "[EC2-Classic, default VPC] The names of the security groups. For a nondefault VPC, you must use security group IDs instead. \n If you specify a network interface, you must specify any security groups as part of the network interface. \n Default: Amazon EC2 uses the default security group." + description: "[EC2-Classic, default VPC] The names of the security groups. For a nondefault\nVPC, you must use security group IDs instead.\n\n\nIf you specify a network interface, you must specify any security groups\nas part of the network interface.\n\n\nDefault: Amazon EC2 uses the default security group." items: type: "string" type: "array" subnetID: - description: "[EC2-VPC] The ID of the subnet to launch the instance into. \n If you specify a network interface, you must specify any subnets as part of the network interface." + description: "[EC2-VPC] The ID of the subnet to launch the instance into.\n\n\nIf you specify a network interface, you must specify any subnets as part\nof the network interface." type: "string" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -392,20 +392,20 @@ spec: type: "object" type: "array" userData: - description: "The user data script to make available to the instance. For more information, see Run commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and Run commands on your Windows instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-windows-user-data.html). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB." + description: "The user data script to make available to the instance. For more information,\nsee Run commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html)\nand Run commands on your Windows instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-windows-user-data.html).\nIf you are using a command line tool, base64-encoding is performed for you,\nand you can load the text from a file. Otherwise, you must provide base64-encoded\ntext. User data is limited to 16 KB." type: "string" type: "object" status: description: "InstanceStatus defines the observed state of Instance" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -415,22 +415,22 @@ spec: - "region" type: "object" amiLaunchIndex: - description: "The AMI launch index, which can be used to find this instance in the launch group." + description: "The AMI launch index, which can be used to find this instance in the launch\ngroup." format: "int64" type: "integer" architecture: description: "The architecture of the image." type: "string" bootMode: - description: "The boot mode of the instance. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) in the Amazon EC2 User Guide." + description: "The boot mode of the instance. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html)\nin the Amazon EC2 User Guide." type: "string" capacityReservationID: description: "The ID of the Capacity Reservation." type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -488,7 +488,7 @@ spec: description: "Specifies whether enhanced networking with ENA is enabled." type: "boolean" hypervisor: - description: "The hypervisor type of the instance. The value xen is used for both Xen and Nitro hypervisors." + description: "The hypervisor type of the instance. The value xen is used for both Xen and\nNitro hypervisors." type: "string" instanceID: description: "The ID of the instance." @@ -519,10 +519,10 @@ spec: description: "The value is Windows for Windows instances; otherwise blank." type: "string" platformDetails: - description: "The platform details value for the instance. For more information, see AMI billing information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html) in the Amazon EC2 User Guide." + description: "The platform details value for the instance. For more information, see AMI\nbilling information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html)\nin the Amazon EC2 User Guide." type: "string" privateDNSName: - description: "(IPv4 only) The private DNS hostname name assigned to the instance. This DNS hostname can only be used inside the Amazon EC2 network. This name is not available until the instance enters the running state. \n [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private DNS hostnames if you've enabled DNS resolution and DNS hostnames in your VPC. If you are not using the Amazon-provided DNS server in your VPC, your custom domain name servers must resolve the hostname as appropriate." + description: "(IPv4 only) The private DNS hostname name assigned to the instance. This\nDNS hostname can only be used inside the Amazon EC2 network. This name is\nnot available until the instance enters the running state.\n\n\n[EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private\nDNS hostnames if you've enabled DNS resolution and DNS hostnames in your\nVPC. If you are not using the Amazon-provided DNS server in your VPC, your\ncustom domain name servers must resolve the hostname as appropriate." type: "string" productCodes: description: "The product codes attached to this instance, if applicable." @@ -536,16 +536,16 @@ spec: type: "object" type: "array" publicDNSName: - description: "(IPv4 only) The public DNS name assigned to the instance. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC." + description: "(IPv4 only) The public DNS name assigned to the instance. This name is not\navailable until the instance enters the running state. For EC2-VPC, this\nname is only available if you've enabled DNS hostnames for your VPC." type: "string" publicIPAddress: - description: "The public IPv4 address, or the Carrier IP address assigned to the instance, if applicable. \n A Carrier IP address only applies to an instance launched in a subnet associated with a Wavelength Zone." + description: "The public IPv4 address, or the Carrier IP address assigned to the instance,\nif applicable.\n\n\nA Carrier IP address only applies to an instance launched in a subnet associated\nwith a Wavelength Zone." type: "string" rootDeviceName: description: "The device name of the root device volume (for example, /dev/sda1)." type: "string" rootDeviceType: - description: "The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume." + description: "The root device type used by the AMI. The AMI can use an EBS volume or an\ninstance store volume." type: "string" sourceDestCheck: description: "Indicates whether source/destination checking is enabled." @@ -554,7 +554,7 @@ spec: description: "If the request is a Spot Instance request, the ID of the request." type: "string" sriovNetSupport: - description: "Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled." + description: "Specifies whether enhanced networking with the Intel 82599 Virtual Function\ninterface is enabled." type: "string" state: description: "The current state of the instance." @@ -577,10 +577,10 @@ spec: description: "The reason for the most recent state transition. This might be an empty string." type: "string" tpmSupport: - description: "If the instance is configured for NitroTPM support, the value is v2.0. For more information, see NitroTPM (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) in the Amazon EC2 User Guide." + description: "If the instance is configured for NitroTPM support, the value is v2.0. For\nmore information, see NitroTPM (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html)\nin the Amazon EC2 User Guide." type: "string" usageOperation: - description: "The usage operation value for the instance. For more information, see AMI billing information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html) in the Amazon EC2 User Guide." + description: "The usage operation value for the instance. For more information, see AMI\nbilling information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html)\nin the Amazon EC2 User Guide." type: "string" usageOperationUpdateTime: description: "The time that the usage operation was last updated." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/internetgateways.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/internetgateways.yaml index 2325b9a70..75c97de95 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/internetgateways.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/internetgateways.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "internetgateways.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -23,22 +23,22 @@ spec: description: "InternetGateway is the Schema for the InternetGateways API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "InternetGatewaySpec defines the desired state of InternetGateway. \n Describes an internet gateway." + description: "InternetGatewaySpec defines the desired state of InternetGateway.\n\n\nDescribes an internet gateway." properties: routeTableRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -50,7 +50,7 @@ spec: type: "string" type: "array" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -64,10 +64,10 @@ spec: description: "The ID of the VPC." type: "string" vpcRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -78,13 +78,13 @@ spec: description: "InternetGatewayStatus defines the observed state of InternetGateway" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -96,7 +96,7 @@ spec: attachments: description: "Any VPCs attached to the internet gateway." items: - description: "Describes the attachment of a VPC to an internet gateway or an egress-only internet gateway." + description: "Describes the attachment of a VPC to an internet gateway or an egress-only\ninternet gateway." properties: state: type: "string" @@ -105,9 +105,9 @@ spec: type: "object" type: "array" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/natgateways.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/natgateways.yaml index 41989384b..2da2b395e 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/natgateways.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/natgateways.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "natgateways.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -26,47 +26,47 @@ spec: description: "NATGateway is the Schema for the NATGateways API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "NatGatewaySpec defines the desired state of NatGateway. \n Describes a NAT gateway." + description: "NatGatewaySpec defines the desired state of NatGateway.\n\n\nDescribes a NAT gateway." properties: allocationID: - description: "[Public NAT gateways only] The allocation ID of an Elastic IP address to associate with the NAT gateway. You cannot specify an Elastic IP address with a private NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it." + description: "[Public NAT gateways only] The allocation ID of an Elastic IP address to\nassociate with the NAT gateway. You cannot specify an Elastic IP address\nwith a private NAT gateway. If the Elastic IP address is associated with\nanother resource, you must first disassociate it." type: "string" allocationRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" connectivityType: - description: "Indicates whether the NAT gateway supports public or private connectivity. The default is public connectivity." + description: "Indicates whether the NAT gateway supports public or private connectivity.\nThe default is public connectivity." type: "string" subnetID: description: "The subnet in which to create the NAT gateway." type: "string" subnetRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -81,13 +81,13 @@ spec: description: "NATGatewayStatus defines the observed state of NATGateway" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -97,9 +97,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -131,13 +131,13 @@ spec: format: "date-time" type: "string" failureCode: - description: "If the NAT gateway could not be created, specifies the error code for the failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound)" + description: "If the NAT gateway could not be created, specifies the error code for the\nfailure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound\n| Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound)" type: "string" failureMessage: - description: "If the NAT gateway could not be created, specifies the error message for the failure, that corresponds to the error code. \n * For InsufficientFreeAddressesInSubnet: \"Subnet has insufficient free addresses to create this NAT gateway\" \n * For Gateway.NotAttached: \"Network vpc-xxxxxxxx has no Internet gateway attached\" \n * For InvalidAllocationID.NotFound: \"Elastic IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway\" \n * For Resource.AlreadyAssociated: \"Elastic IP address eipalloc-xxxxxxxx is already associated\" \n * For InternalError: \"Network interface eni-xxxxxxxx, created and used internally by this NAT gateway is in an invalid state. Please try again.\" \n * For InvalidSubnetID.NotFound: \"The specified subnet subnet-xxxxxxxx does not exist or could not be found.\"" + description: "If the NAT gateway could not be created, specifies the error message for\nthe failure, that corresponds to the error code.\n\n\n * For InsufficientFreeAddressesInSubnet: \"Subnet has insufficient free\n addresses to create this NAT gateway\"\n\n\n * For Gateway.NotAttached: \"Network vpc-xxxxxxxx has no Internet gateway\n attached\"\n\n\n * For InvalidAllocationID.NotFound: \"Elastic IP address eipalloc-xxxxxxxx\n could not be associated with this NAT gateway\"\n\n\n * For Resource.AlreadyAssociated: \"Elastic IP address eipalloc-xxxxxxxx\n is already associated\"\n\n\n * For InternalError: \"Network interface eni-xxxxxxxx, created and used\n internally by this NAT gateway is in an invalid state. Please try again.\"\n\n\n * For InvalidSubnetID.NotFound: \"The specified subnet subnet-xxxxxxxx\n does not exist or could not be found.\"" type: "string" natGatewayAddresses: - description: "Information about the IP addresses and network interface associated with the NAT gateway." + description: "Information about the IP addresses and network interface associated with\nthe NAT gateway." items: description: "Describes the IP addresses and network interface associated with a NAT gateway." properties: @@ -155,7 +155,7 @@ spec: description: "The ID of the NAT gateway." type: "string" provisionedBandwidth: - description: "Reserved. If you need to sustain traffic greater than the documented limits (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), contact us through the Support Center (https://console.aws.amazon.com/support/home?)." + description: "Reserved. If you need to sustain traffic greater than the documented limits\n(https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html),\ncontact us through the Support Center (https://console.aws.amazon.com/support/home?)." properties: provisionTime: format: "date-time" @@ -171,7 +171,7 @@ spec: type: "string" type: "object" state: - description: "The state of the NAT gateway. \n * pending: The NAT gateway is being created and is not ready to process traffic. \n * failed: The NAT gateway could not be created. Check the failureCode and failureMessage fields for the reason. \n * available: The NAT gateway is able to process traffic. This status remains until you delete the NAT gateway, and does not indicate the health of the NAT gateway. \n * deleting: The NAT gateway is in the process of being terminated and may still be processing traffic. \n * deleted: The NAT gateway has been terminated and is no longer processing traffic." + description: "The state of the NAT gateway.\n\n\n * pending: The NAT gateway is being created and is not ready to process\n traffic.\n\n\n * failed: The NAT gateway could not be created. Check the failureCode\n and failureMessage fields for the reason.\n\n\n * available: The NAT gateway is able to process traffic. This status remains\n until you delete the NAT gateway, and does not indicate the health of\n the NAT gateway.\n\n\n * deleting: The NAT gateway is in the process of being terminated and\n may still be processing traffic.\n\n\n * deleted: The NAT gateway has been terminated and is no longer processing\n traffic." type: "string" vpcID: description: "The ID of the VPC in which the NAT gateway is located." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/routetables.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/routetables.yaml index 2a2221fa9..9e5094047 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/routetables.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/routetables.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "routetables.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -23,15 +23,15 @@ spec: description: "RouteTable is the Schema for the RouteTables API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "RouteTableSpec defines the desired state of RouteTable. \n Describes a route table." + description: "RouteTableSpec defines the desired state of RouteTable.\n\n\nDescribes a route table." properties: routes: items: @@ -54,7 +54,7 @@ spec: description: "Reference field for GatewayID" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -70,7 +70,7 @@ spec: description: "Reference field for NATGatewayID" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -84,7 +84,7 @@ spec: description: "Reference field for TransitGatewayID" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -96,7 +96,7 @@ spec: description: "Reference field for VPCEndpointID" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -108,7 +108,7 @@ spec: description: "Reference field for VPCPeeringConnectionID" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -117,7 +117,7 @@ spec: type: "object" type: "array" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -131,10 +131,10 @@ spec: description: "The ID of the VPC." type: "string" vpcRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -145,13 +145,13 @@ spec: description: "RouteTableStatus defines the observed state of RouteTable" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -166,7 +166,7 @@ spec: description: "Describes an association between a route table and a subnet or gateway." properties: associationState: - description: "Describes the state of an association between a route table and a subnet or gateway." + description: "Describes the state of an association between a route table and a subnet\nor gateway." properties: state: type: "string" @@ -186,9 +186,9 @@ spec: type: "object" type: "array" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/securitygroups.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/securitygroups.yaml index 0d7a931c3..0466b4638 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/securitygroups.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/securitygroups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "securitygroups.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -23,18 +23,18 @@ spec: description: "SecurityGroup is the Schema for the SecurityGroups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "SecurityGroupSpec defines the desired state of SecurityGroup. \n Describes a security group." + description: "SecurityGroupSpec defines the desired state of SecurityGroup.\n\n\nDescribes a security group." properties: description: - description: "A description for the security group. This is informational only. \n Constraints: Up to 255 characters in length \n Constraints for EC2-Classic: ASCII characters \n Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*" + description: "A description for the security group. This is informational only.\n\n\nConstraints: Up to 255 characters in length\n\n\nConstraints for EC2-Classic: ASCII characters\n\n\nConstraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*" type: "string" egressRules: items: @@ -80,7 +80,7 @@ spec: type: "integer" userIDGroupPairs: items: - description: "Describes a security group and Amazon Web Services account ID pair. \n We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) in the Amazon Elastic Compute Cloud User Guide." + description: "Describes a security group and Amazon Web Services account ID pair.\n\n\nWe are retiring EC2-Classic on August 15, 2022. We recommend that you migrate\nfrom EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic\nto a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html)\nin the Amazon Elastic Compute Cloud User Guide." properties: description: type: "string" @@ -144,7 +144,7 @@ spec: type: "integer" userIDGroupPairs: items: - description: "Describes a security group and Amazon Web Services account ID pair. \n We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) in the Amazon Elastic Compute Cloud User Guide." + description: "Describes a security group and Amazon Web Services account ID pair.\n\n\nWe are retiring EC2-Classic on August 15, 2022. We recommend that you migrate\nfrom EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic\nto a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html)\nin the Amazon Elastic Compute Cloud User Guide." properties: description: type: "string" @@ -165,10 +165,10 @@ spec: type: "object" type: "array" name: - description: "The name of the security group. \n Constraints: Up to 255 characters in length. Cannot start with sg-. \n Constraints for EC2-Classic: ASCII characters \n Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*" + description: "The name of the security group.\n\n\nConstraints: Up to 255 characters in length. Cannot start with sg-.\n\n\nConstraints for EC2-Classic: ASCII characters\n\n\nConstraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$*" type: "string" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -182,10 +182,10 @@ spec: description: "[EC2-VPC] The ID of the VPC. Required for EC2-VPC." type: "string" vpcRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -199,13 +199,13 @@ spec: description: "SecurityGroupStatus defines the observed state of SecurityGroup" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -215,9 +215,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/subnets.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/subnets.yaml index ed10be0b4..66af95b14 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/subnets.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/subnets.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "subnets.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -26,26 +26,26 @@ spec: description: "Subnet is the Schema for the Subnets API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "SubnetSpec defines the desired state of Subnet. \n Describes a subnet." + description: "SubnetSpec defines the desired state of Subnet.\n\n\nDescribes a subnet." properties: assignIPv6AddressOnCreation: type: "boolean" availabilityZone: - description: "The Availability Zone or Local Zone for the subnet. \n Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet. \n To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Regions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) in the Amazon Elastic Compute Cloud User Guide. \n To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN." + description: "The Availability Zone or Local Zone for the subnet.\n\n\nDefault: Amazon Web Services selects one for you. If you create more than\none subnet in your VPC, we do not necessarily select a different zone for\neach subnet.\n\n\nTo create a subnet in a Local Zone, set this value to the Local Zone ID,\nfor example us-west-2-lax-1a. For information about the Regions that support\nLocal Zones, see Available Regions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions)\nin the Amazon Elastic Compute Cloud User Guide.\n\n\nTo create a subnet in an Outpost, set this value to the Availability Zone\nfor the Outpost and specify the Outpost ARN." type: "string" availabilityZoneID: description: "The AZ ID or the Local Zone ID of the subnet." type: "string" cidrBlock: - description: "The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. \n This parameter is not supported for an IPv6 only subnet." + description: "The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24.\nWe modify the specified CIDR block to its canonical form; for example, if\nyou specify 100.68.0.18/18, we modify it to 100.68.0.0/18.\n\n\nThis parameter is not supported for an IPv6 only subnet." type: "string" customerOwnedIPv4Pool: type: "string" @@ -58,7 +58,7 @@ spec: hostnameType: type: "string" ipv6CIDRBlock: - description: "The IPv6 network range for the subnet, in CIDR notation. The subnet size must use a /64 prefix length. \n This parameter is required for an IPv6 only subnet." + description: "The IPv6 network range for the subnet, in CIDR notation. The subnet size\nmust use a /64 prefix length.\n\n\nThis parameter is required for an IPv6 only subnet." type: "string" ipv6Native: description: "Indicates whether to create an IPv6 only subnet." @@ -66,14 +66,14 @@ spec: mapPublicIPOnLaunch: type: "boolean" outpostARN: - description: "The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost ARN, you must also specify the Availability Zone of the Outpost subnet." + description: "The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost\nARN, you must also specify the Availability Zone of the Outpost subnet." type: "string" routeTableRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -85,7 +85,7 @@ spec: type: "string" type: "array" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -99,10 +99,10 @@ spec: description: "The ID of the VPC." type: "string" vpcRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -113,13 +113,13 @@ spec: description: "SubnetStatus defines the observed state of Subnet" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -129,13 +129,13 @@ spec: - "region" type: "object" availableIPAddressCount: - description: "The number of unused private IPv4 addresses in the subnet. The IPv4 addresses for any stopped instances are considered unavailable." + description: "The number of unused private IPv4 addresses in the subnet. The IPv4 addresses\nfor any stopped instances are considered unavailable." format: "int64" type: "integer" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -162,7 +162,7 @@ spec: description: "Indicates whether this is the default subnet for the Availability Zone." type: "boolean" enableLniAtDeviceIndex: - description: "Indicates the device position for local network interfaces in this subnet. For example, 1 indicates local network interfaces in this subnet are the secondary network interface (eth1)." + description: "Indicates the device position for local network interfaces in this subnet.\nFor example, 1 indicates local network interfaces in this subnet are the\nsecondary network interface (eth1)." format: "int64" type: "integer" ipv6CIDRBlockAssociationSet: @@ -185,13 +185,13 @@ spec: type: "object" type: "array" mapCustomerOwnedIPOnLaunch: - description: "Indicates whether a network interface created in this subnet (including a network interface created by RunInstances) receives a customer-owned IPv4 address." + description: "Indicates whether a network interface created in this subnet (including a\nnetwork interface created by RunInstances) receives a customer-owned IPv4\naddress." type: "boolean" ownerID: description: "The ID of the Amazon Web Services account that owns the subnet." type: "string" privateDNSNameOptionsOnLaunch: - description: "The type of hostnames to assign to instances in the subnet at launch. An instance hostname is based on the IPv4 address or ID of the instance." + description: "The type of hostnames to assign to instances in the subnet at launch. An\ninstance hostname is based on the IPv4 address or ID of the instance." properties: enableResourceNameDNSAAAARecord: type: "boolean" diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/transitgateways.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/transitgateways.yaml index 783ad86c9..d053b15dd 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/transitgateways.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/transitgateways.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "transitgateways.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -26,15 +26,15 @@ spec: description: "TransitGateway is the Schema for the TransitGateways API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "TransitGatewaySpec defines the desired state of TransitGateway. \n Describes a transit gateway." + description: "TransitGatewaySpec defines the desired state of TransitGateway.\n\n\nDescribes a transit gateway." properties: description: description: "A description of the transit gateway." @@ -63,7 +63,7 @@ spec: type: "string" type: "object" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -78,13 +78,13 @@ spec: description: "TransitGatewayStatus defines the observed state of TransitGateway" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -94,9 +94,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcendpoints.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcendpoints.yaml index f8affc334..e690735ea 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcendpoints.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcendpoints.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "vpcendpoints.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -26,15 +26,15 @@ spec: description: "VPCEndpoint is the Schema for the VPCEndpoints API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "VpcEndpointSpec defines the desired state of VpcEndpoint. \n Describes a VPC endpoint." + description: "VpcEndpointSpec defines the desired state of VpcEndpoint.\n\n\nDescribes a VPC endpoint." properties: dnsOptions: description: "The DNS options for the endpoint." @@ -46,10 +46,10 @@ spec: description: "The IP address type for the endpoint." type: "string" policyDocument: - description: "(Interface and gateway endpoints) A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service." + description: "(Interface and gateway endpoints) A policy to attach to the endpoint that\ncontrols access to the service. The policy must be in valid JSON format.\nIf this parameter is not specified, we attach a default policy that allows\nfull access to the service." type: "string" privateDNSEnabled: - description: "(Interface endpoint) Indicates whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, kinesis.us-east-1.amazonaws.com), which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service. \n To use a private hosted zone, you must set the following VPC attributes to true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to set the VPC attributes. \n Default: true" + description: "(Interface endpoint) Indicates whether to associate a private hosted zone\nwith the specified VPC. The private hosted zone contains a record set for\nthe default public DNS name for the service for the Region (for example,\nkinesis.us-east-1.amazonaws.com), which resolves to the private IP addresses\nof the endpoint network interfaces in the VPC. This enables you to make requests\nto the default public DNS name for the service instead of the public DNS\nnames that are automatically generated by the VPC endpoint service.\n\n\nTo use a private hosted zone, you must set the following VPC attributes to\ntrue: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to\nset the VPC attributes.\n\n\nDefault: true" type: "boolean" routeTableIDs: description: "(Gateway endpoint) One or more route table IDs." @@ -58,10 +58,10 @@ spec: type: "array" routeTableRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -69,16 +69,16 @@ spec: type: "object" type: "array" securityGroupIDs: - description: "(Interface endpoint) The ID of one or more security groups to associate with the endpoint network interface." + description: "(Interface endpoint) The ID of one or more security groups to associate with\nthe endpoint network interface." items: type: "string" type: "array" securityGroupRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -86,19 +86,19 @@ spec: type: "object" type: "array" serviceName: - description: "The service name. To get a list of available services, use the DescribeVpcEndpointServices request, or get the name from the service provider." + description: "The service name. To get a list of available services, use the DescribeVpcEndpointServices\nrequest, or get the name from the service provider." type: "string" subnetIDs: - description: "(Interface and Gateway Load Balancer endpoints) The ID of one or more subnets in which to create an endpoint network interface. For a Gateway Load Balancer endpoint, you can specify one subnet only." + description: "(Interface and Gateway Load Balancer endpoints) The ID of one or more subnets\nin which to create an endpoint network interface. For a Gateway Load Balancer\nendpoint, you can specify one subnet only." items: type: "string" type: "array" subnetRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -106,7 +106,7 @@ spec: type: "object" type: "array" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -117,16 +117,16 @@ spec: type: "object" type: "array" vpcEndpointType: - description: "The type of endpoint. \n Default: Gateway" + description: "The type of endpoint.\n\n\nDefault: Gateway" type: "string" vpcID: description: "The ID of the VPC in which the endpoint will be used." type: "string" vpcRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -139,13 +139,13 @@ spec: description: "VPCEndpointStatus defines the observed state of VPCEndpoint" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -155,9 +155,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -196,7 +196,7 @@ spec: type: "object" type: "array" groups: - description: "(Interface endpoint) Information about the security groups that are associated with the network interface." + description: "(Interface endpoint) Information about the security groups that are associated\nwith the network interface." items: description: "Describes a security group." properties: diff --git a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcs.yaml b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcs.yaml index a4be907d2..1bc4ef749 100644 --- a/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcs.yaml +++ b/crd-catalog/aws-controllers-k8s/ec2-controller/ec2.services.k8s.aws/v1alpha1/vpcs.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "vpcs.ec2.services.k8s.aws" spec: group: "ec2.services.k8s.aws" @@ -26,18 +26,18 @@ spec: description: "VPC is the Schema for the VPCS API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "VpcSpec defines the desired state of Vpc. \n Describes a VPC." + description: "VpcSpec defines the desired state of Vpc.\n\n\nDescribes a VPC." properties: amazonProvidedIPv6CIDRBlock: - description: "Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IP addresses, or the size of the CIDR block." + description: "Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for\nthe VPC. You cannot specify the range of IP addresses, or the size of the\nCIDR block." type: "boolean" cidrBlocks: items: @@ -50,33 +50,33 @@ spec: description: "The attribute value. The valid values are true or false." type: "boolean" instanceTenancy: - description: "The tenancy options for instances launched into the VPC. For default, instances are launched with shared tenancy by default. You can launch instances with any tenancy into a shared tenancy VPC. For dedicated, instances are launched as dedicated tenancy instances by default. You can only launch instances with a tenancy of dedicated or host into a dedicated tenancy VPC. \n Important: The host value cannot be used with this parameter. Use the default or dedicated values only. \n Default: default" + description: "The tenancy options for instances launched into the VPC. For default, instances\nare launched with shared tenancy by default. You can launch instances with\nany tenancy into a shared tenancy VPC. For dedicated, instances are launched\nas dedicated tenancy instances by default. You can only launch instances\nwith a tenancy of dedicated or host into a dedicated tenancy VPC.\n\n\nImportant: The host value cannot be used with this parameter. Use the default\nor dedicated values only.\n\n\nDefault: default" type: "string" ipv4IPAMPoolID: - description: "The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide." + description: "The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR.\nFor more information, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html)\nin the Amazon VPC IPAM User Guide." type: "string" ipv4NetmaskLength: - description: "The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide." + description: "The netmask length of the IPv4 CIDR you want to allocate to this VPC from\nan Amazon VPC IP Address Manager (IPAM) pool. For more information about\nIPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html)\nin the Amazon VPC IPAM User Guide." format: "int64" type: "integer" ipv6CIDRBlock: - description: "The IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool in the request. \n To let Amazon choose the IPv6 CIDR block for you, omit this parameter." + description: "The IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool\nin the request.\n\n\nTo let Amazon choose the IPv6 CIDR block for you, omit this parameter." type: "string" ipv6CIDRBlockNetworkBorderGroup: - description: "The name of the location from which we advertise the IPV6 CIDR block. Use this parameter to limit the address to this location. \n You must set AmazonProvidedIpv6CidrBlock to true to use this parameter." + description: "The name of the location from which we advertise the IPV6 CIDR block. Use\nthis parameter to limit the address to this location.\n\n\nYou must set AmazonProvidedIpv6CidrBlock to true to use this parameter." type: "string" ipv6IPAMPoolID: - description: "The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide." + description: "The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6\nCIDR. IPAM is a VPC feature that you can use to automate your IP address\nmanagement workflows including assigning, tracking, troubleshooting, and\nauditing IP addresses across Amazon Web Services Regions and accounts throughout\nyour Amazon Web Services Organization. For more information, see What is\nIPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html)\nin the Amazon VPC IPAM User Guide." type: "string" ipv6NetmaskLength: - description: "The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide." + description: "The netmask length of the IPv6 CIDR you want to allocate to this VPC from\nan Amazon VPC IP Address Manager (IPAM) pool. For more information about\nIPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html)\nin the Amazon VPC IPAM User Guide." format: "int64" type: "integer" ipv6Pool: description: "The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block." type: "string" tags: - description: "The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string." + description: "The tags. The value parameter is required, but if you don't want the tag\nto have a value, specify the parameter with no value, and we set the value\nto an empty string." items: description: "Describes a tag." properties: @@ -93,13 +93,13 @@ spec: description: "VPCStatus defines the observed state of VPC" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -128,9 +128,9 @@ spec: type: "object" type: "array" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/pullthroughcacherules.yaml b/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/pullthroughcacherules.yaml index ebe912c26..0d7a89d37 100644 --- a/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/pullthroughcacherules.yaml +++ b/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/pullthroughcacherules.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "pullthroughcacherules.ecr.services.k8s.aws" spec: group: "ecr.services.k8s.aws" @@ -19,24 +19,24 @@ spec: description: "PullThroughCacheRule is the Schema for the PullThroughCacheRules API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "PullThroughCacheRuleSpec defines the desired state of PullThroughCacheRule. \n The details of a pull through cache rule." + description: "PullThroughCacheRuleSpec defines the desired state of PullThroughCacheRule.\n\n\nThe details of a pull through cache rule." properties: ecrRepositoryPrefix: description: "The repository name prefix to use when caching images from the source registry." type: "string" registryID: - description: "The Amazon Web Services account ID associated with the registry to create the pull through cache rule for. If you do not specify a registry, the default registry is assumed." + description: "The Amazon Web Services account ID associated with the registry to create\nthe pull through cache rule for. If you do not specify a registry, the default\nregistry is assumed." type: "string" upstreamRegistryURL: - description: "The registry URL of the upstream public registry to use as the source for the pull through cache rule." + description: "The registry URL of the upstream public registry to use as the source for\nthe pull through cache rule." type: "string" required: - "ecrRepositoryPrefix" @@ -46,13 +46,13 @@ spec: description: "PullThroughCacheRuleStatus defines the observed state of PullThroughCacheRule" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -62,9 +62,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -88,7 +88,7 @@ spec: type: "object" type: "array" createdAt: - description: "The date and time, in JavaScript date format, when the pull through cache rule was created." + description: "The date and time, in JavaScript date format, when the pull through cache\nrule was created." format: "date-time" type: "string" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/repositories.yaml b/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/repositories.yaml index fbc8d1a11..c0be50788 100644 --- a/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/repositories.yaml +++ b/crd-catalog/aws-controllers-k8s/ecr-controller/ecr.services.k8s.aws/v1alpha1/repositories.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "repositories.ecr.services.k8s.aws" spec: group: "ecr.services.k8s.aws" @@ -29,18 +29,18 @@ spec: description: "Repository is the Schema for the Repositories API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "RepositorySpec defines the desired state of Repository. \n An object representing a repository." + description: "RepositorySpec defines the desired state of Repository.\n\n\nAn object representing a repository." properties: encryptionConfiguration: - description: "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest." + description: "The encryption configuration for the repository. This determines how the\ncontents of your repository are encrypted at rest." properties: encryptionType: type: "string" @@ -48,30 +48,30 @@ spec: type: "string" type: "object" imageScanningConfiguration: - description: "The image scanning configuration for the repository. This determines whether images are scanned for known vulnerabilities after being pushed to the repository." + description: "The image scanning configuration for the repository. This determines whether\nimages are scanned for known vulnerabilities after being pushed to the repository." properties: scanOnPush: type: "boolean" type: "object" imageTagMutability: - description: "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten." + description: "The tag mutability setting for the repository. If this parameter is omitted,\nthe default setting of MUTABLE will be used which will allow image tags to\nbe overwritten. If IMMUTABLE is specified, all image tags within the repository\nwill be immutable which will prevent them from being overwritten." type: "string" lifecyclePolicy: description: "The JSON repository policy text to apply to the repository." type: "string" name: - description: "The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app)." + description: "The name to use for the repository. The repository name may be specified\non its own (such as nginx-web-app) or it can be prepended with a namespace\nto group the repository into a category (such as project-a/nginx-web-app)." type: "string" policy: - description: "The JSON repository policy text to apply to the repository. For more information, see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) in the Amazon Elastic Container Registry User Guide." + description: "The JSON repository policy text to apply to the repository. For more information,\nsee Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html)\nin the Amazon Elastic Container Registry User Guide." type: "string" registryID: - description: "The Amazon Web Services account ID associated with the registry to create the repository. If you do not specify a registry, the default registry is assumed." + description: "The Amazon Web Services account ID associated with the registry to create\nthe repository. If you do not specify a registry, the default registry is\nassumed." type: "string" tags: - description: "The metadata that you apply to the repository to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters." + description: "The metadata that you apply to the repository to help you categorize and\norganize them. Each tag consists of a key and an optional value, both of\nwhich you define. Tag keys can have a maximum character length of 128 characters,\nand tag values can have a maximum length of 256 characters." items: - description: "The metadata to apply to a resource to help you categorize and organize them. Each tag consists of a key and a value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters." + description: "The metadata to apply to a resource to help you categorize and organize them.\nEach tag consists of a key and a value, both of which you define. Tag keys\ncan have a maximum character length of 128 characters, and tag values can\nhave a maximum length of 256 characters." properties: key: type: "string" @@ -86,13 +86,13 @@ spec: description: "RepositoryStatus defines the observed state of Repository" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -102,9 +102,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -132,7 +132,7 @@ spec: format: "date-time" type: "string" repositoryURI: - description: "The URI for the repository. You can use this URI for container image push and pull operations." + description: "The URI for the repository. You can use this URI for container image push\nand pull operations." type: "string" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/jobruns.yaml b/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/jobruns.yaml index c75d57664..9d82bbccc 100644 --- a/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/jobruns.yaml +++ b/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/jobruns.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "jobruns.emrcontainers.services.k8s.aws" spec: group: "emrcontainers.services.k8s.aws" @@ -26,15 +26,15 @@ spec: description: "JobRun is the Schema for the JobRuns API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "JobRunSpec defines the desired state of JobRun. \n This entity describes a job run. A job run is a unit of work, such as a Spark jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on EKS." + description: "JobRunSpec defines the desired state of JobRun.\n\n\nThis entity describes a job run. A job run is a unit of work, such as a Spark\njar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on\nEKS." properties: configurationOverrides: type: "string" @@ -72,10 +72,10 @@ spec: description: "The virtual cluster ID for which the job run request is submitted." type: "string" virtualClusterRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -90,13 +90,13 @@ spec: description: "JobRunStatus defines the observed state of JobRun" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -106,9 +106,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/virtualclusters.yaml b/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/virtualclusters.yaml index b5faba97a..f37410584 100644 --- a/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/virtualclusters.yaml +++ b/crd-catalog/aws-controllers-k8s/emrcontainers-controller/emrcontainers.services.k8s.aws/v1alpha1/virtualclusters.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "virtualclusters.emrcontainers.services.k8s.aws" spec: group: "emrcontainers.services.k8s.aws" @@ -19,15 +19,15 @@ spec: description: "VirtualCluster is the Schema for the VirtualClusters API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "VirtualClusterSpec defines the desired state of VirtualCluster. \n This entity describes a virtual cluster. A virtual cluster is a Kubernetes namespace that Amazon EMR is registered with. Amazon EMR uses virtual clusters to run jobs and host endpoints. Multiple virtual clusters can be backed by the same physical cluster. However, each virtual cluster maps to one namespace on an EKS cluster. Virtual clusters do not create any active resources that contribute to your bill or that require lifecycle management outside the service." + description: "VirtualClusterSpec defines the desired state of VirtualCluster.\n\n\nThis entity describes a virtual cluster. A virtual cluster is a Kubernetes\nnamespace that Amazon EMR is registered with. Amazon EMR uses virtual clusters\nto run jobs and host endpoints. Multiple virtual clusters can be backed by\nthe same physical cluster. However, each virtual cluster maps to one namespace\non an EKS cluster. Virtual clusters do not create any active resources that\ncontribute to your bill or that require lifecycle management outside the\nservice." properties: containerProvider: description: "The container provider of the virtual cluster." @@ -63,13 +63,13 @@ spec: description: "VirtualClusterStatus defines the observed state of VirtualCluster" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -79,9 +79,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/groups.yaml b/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/groups.yaml index 200d4d77d..487138751 100644 --- a/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/groups.yaml +++ b/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/groups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "groups.iam.services.k8s.aws" spec: group: "iam.services.k8s.aws" @@ -19,25 +19,25 @@ spec: description: "Group is the Schema for the Groups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "GroupSpec defines the desired state of Group. \n Contains information about an IAM group entity. \n This data type is used as a response element in the following operations: \n * CreateGroup \n * GetGroup \n * ListGroups" + description: "GroupSpec defines the desired state of Group.\n\n\nContains information about an IAM group entity.\n\n\nThis data type is used as a response element in the following operations:\n\n\n * CreateGroup\n\n\n * GetGroup\n\n\n * ListGroups" properties: inlinePolicies: additionalProperties: type: "string" type: "object" name: - description: "The name of the group to create. Do not include the path in this value. \n IAM user, group, role, and policy names must be unique within the account. Names are not distinguished by case. For example, you cannot create resources named both \"MyResource\" and \"myresource\"." + description: "The name of the group to create. Do not include the path in this value.\n\n\nIAM user, group, role, and policy names must be unique within the account.\nNames are not distinguished by case. For example, you cannot create resources\nnamed both \"MyResource\" and \"myresource\"." type: "string" path: - description: "The path to the group. For more information about paths, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. \n This parameter is optional. If it is not included, it defaults to a slash (/). \n This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters." + description: "The path to the group. For more information about paths, see IAM identifiers\n(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide.\n\n\nThis parameter is optional. If it is not included, it defaults to a slash\n(/).\n\n\nThis parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex))\na string of characters consisting of either a forward slash (/) by itself\nor a string that must begin and end with forward slashes. In addition, it\ncan contain any ASCII character from the ! (\\u0021) through the DEL character\n(\\u007F), including most punctuation characters, digits, and upper and lowercased\nletters." type: "string" policies: items: @@ -45,10 +45,10 @@ spec: type: "array" policyRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -62,13 +62,13 @@ spec: description: "GroupStatus defines the observed state of Group" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -78,9 +78,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -104,11 +104,11 @@ spec: type: "object" type: "array" createDate: - description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the group was created." + description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601),\nwhen the group was created." format: "date-time" type: "string" groupID: - description: "The stable and unique string identifying the group. For more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide." + description: "The stable and unique string identifying the group. For more information\nabout IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide." type: "string" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/policies.yaml b/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/policies.yaml index a91cd7f51..9ef75b921 100644 --- a/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/policies.yaml +++ b/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/policies.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "policies.iam.services.k8s.aws" spec: group: "iam.services.k8s.aws" @@ -19,32 +19,32 @@ spec: description: "Policy is the Schema for the Policies API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "PolicySpec defines the desired state of Policy. \n Contains information about a managed policy. \n This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations. \n For more information about managed policies, refer to Managed policies and inline policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) in the IAM User Guide." + description: "PolicySpec defines the desired state of Policy.\n\n\nContains information about a managed policy.\n\n\nThis data type is used as a response element in the CreatePolicy, GetPolicy,\nand ListPolicies operations.\n\n\nFor more information about managed policies, refer to Managed policies and\ninline policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html)\nin the IAM User Guide." properties: description: - description: "A friendly description of the policy. \n Typically used to store information about the permissions defined in the policy. For example, \"Grants access to production DynamoDB tables.\" \n The policy description is immutable. After a value is assigned, it cannot be changed." + description: "A friendly description of the policy.\n\n\nTypically used to store information about the permissions defined in the\npolicy. For example, \"Grants access to production DynamoDB tables.\"\n\n\nThe policy description is immutable. After a value is assigned, it cannot\nbe changed." type: "string" name: - description: "The friendly name of the policy. \n IAM user, group, role, and policy names must be unique within the account. Names are not distinguished by case. For example, you cannot create resources named both \"MyResource\" and \"myresource\"." + description: "The friendly name of the policy.\n\n\nIAM user, group, role, and policy names must be unique within the account.\nNames are not distinguished by case. For example, you cannot create resources\nnamed both \"MyResource\" and \"myresource\"." type: "string" path: - description: "The path for the policy. \n For more information about paths, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. \n This parameter is optional. If it is not included, it defaults to a slash (/). \n This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters. \n You cannot use an asterisk (*) in the path name." + description: "The path for the policy.\n\n\nFor more information about paths, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide.\n\n\nThis parameter is optional. If it is not included, it defaults to a slash\n(/).\n\n\nThis parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex))\na string of characters consisting of either a forward slash (/) by itself\nor a string that must begin and end with forward slashes. In addition, it\ncan contain any ASCII character from the ! (\\u0021) through the DEL character\n(\\u007F), including most punctuation characters, digits, and upper and lowercased\nletters.\n\n\nYou cannot use an asterisk (*) in the path name." type: "string" policyDocument: - description: "The JSON policy document that you want to use as the content for the new policy. \n You must provide policies in JSON format in IAM. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM. \n The maximum length of the policy document that you can pass in this operation, including whitespace, is listed below. To view the maximum character counts of a managed policy with no whitespaces, see IAM and STS character quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entity-length). \n To learn more about JSON policy grammar, see Grammar of the IAM JSON policy language (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_grammar.html) in the IAM User Guide. \n The regex pattern (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of characters consisting of the following: \n * Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range \n * The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF) \n * The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)" + description: "The JSON policy document that you want to use as the content for the new\npolicy.\n\n\nYou must provide policies in JSON format in IAM. However, for CloudFormation\ntemplates formatted in YAML, you can provide the policy in JSON or YAML format.\nCloudFormation always converts a YAML policy to JSON format before submitting\nit to IAM.\n\n\nThe maximum length of the policy document that you can pass in this operation,\nincluding whitespace, is listed below. To view the maximum character counts\nof a managed policy with no whitespaces, see IAM and STS character quotas\n(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entity-length).\n\n\nTo learn more about JSON policy grammar, see Grammar of the IAM JSON policy\nlanguage (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_grammar.html)\nin the IAM User Guide.\n\n\nThe regex pattern (http://wikipedia.org/wiki/regex) used to validate this\nparameter is a string of characters consisting of the following:\n\n\n * Any printable ASCII character ranging from the space character (\\u0020)\n through the end of the ASCII character range\n\n\n * The printable characters in the Basic Latin and Latin-1 Supplement character\n set (through \\u00FF)\n\n\n * The special characters tab (\\u0009), line feed (\\u000A), and carriage\n return (\\u000D)" type: "string" tags: - description: "A list of tags that you want to attach to the new IAM customer managed policy. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide. \n If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created." + description: "A list of tags that you want to attach to the new IAM customer managed policy.\nEach tag consists of a key name and an associated value. For more information\nabout tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html)\nin the IAM User Guide.\n\n\nIf any one of the tags is invalid or if you exceed the allowed maximum number\nof tags, then the entire request fails and the resource is not created." items: - description: "A structure that represents user-provided metadata that can be associated with an IAM resource. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide." + description: "A structure that represents user-provided metadata that can be associated\nwith an IAM resource. For more information about tagging, see Tagging IAM\nresources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html)\nin the IAM User Guide." properties: key: type: "string" @@ -60,13 +60,13 @@ spec: description: "PolicyStatus defines the observed state of Policy" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -76,13 +76,13 @@ spec: - "region" type: "object" attachmentCount: - description: "The number of entities (users, groups, and roles) that the policy is attached to." + description: "The number of entities (users, groups, and roles) that the policy is attached\nto." format: "int64" type: "integer" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -106,7 +106,7 @@ spec: type: "object" type: "array" createDate: - description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the policy was created." + description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601),\nwhen the policy was created." format: "date-time" type: "string" defaultVersionID: @@ -116,14 +116,14 @@ spec: description: "Specifies whether the policy can be attached to an IAM user, group, or role." type: "boolean" permissionsBoundaryUsageCount: - description: "The number of entities (users and roles) for which the policy is used to set the permissions boundary. \n For more information about permissions boundaries, see Permissions boundaries for IAM identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) in the IAM User Guide." + description: "The number of entities (users and roles) for which the policy is used to\nset the permissions boundary.\n\n\nFor more information about permissions boundaries, see Permissions boundaries\nfor IAM identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html)\nin the IAM User Guide." format: "int64" type: "integer" policyID: - description: "The stable and unique string identifying the policy. \n For more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide." + description: "The stable and unique string identifying the policy.\n\n\nFor more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide." type: "string" updateDate: - description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the policy was last updated. \n When a policy has only one version, this field contains the date and time when the policy was created. When a policy has more than one version, this field contains the date and time when the most recent policy version was created." + description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601),\nwhen the policy was last updated.\n\n\nWhen a policy has only one version, this field contains the date and time\nwhen the policy was created. When a policy has more than one version, this\nfield contains the date and time when the most recent policy version was\ncreated." format: "date-time" type: "string" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/roles.yaml b/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/roles.yaml index f82dc9b1f..b4957caff 100644 --- a/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/roles.yaml +++ b/crd-catalog/aws-controllers-k8s/iam-controller/iam.services.k8s.aws/v1alpha1/roles.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "roles.iam.services.k8s.aws" spec: group: "iam.services.k8s.aws" @@ -19,18 +19,18 @@ spec: description: "Role is the Schema for the Roles API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "RoleSpec defines the desired state of Role. \n Contains information about an IAM role. This structure is returned as a response element in several API operations that interact with roles." + description: "RoleSpec defines the desired state of Role.\n\n\nContains information about an IAM role. This structure is returned as a response\nelement in several API operations that interact with roles." properties: assumeRolePolicyDocument: - description: "The trust relationship policy document that grants an entity permission to assume the role. \n In IAM, you must provide a JSON policy that has been converted to a string. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM. \n The regex pattern (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of characters consisting of the following: \n * Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range \n * The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF) \n * The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) \n Upon success, the response includes the same trust policy in JSON format." + description: "The trust relationship policy document that grants an entity permission to\nassume the role.\n\n\nIn IAM, you must provide a JSON policy that has been converted to a string.\nHowever, for CloudFormation templates formatted in YAML, you can provide\nthe policy in JSON or YAML format. CloudFormation always converts a YAML\npolicy to JSON format before submitting it to IAM.\n\n\nThe regex pattern (http://wikipedia.org/wiki/regex) used to validate this\nparameter is a string of characters consisting of the following:\n\n\n * Any printable ASCII character ranging from the space character (\\u0020)\n through the end of the ASCII character range\n\n\n * The printable characters in the Basic Latin and Latin-1 Supplement character\n set (through \\u00FF)\n\n\n * The special characters tab (\\u0009), line feed (\\u000A), and carriage\n return (\\u000D)\n\n\nUpon success, the response includes the same trust policy in JSON format." type: "string" description: description: "A description of the role." @@ -40,23 +40,23 @@ spec: type: "string" type: "object" maxSessionDuration: - description: "The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default value of one hour is applied. This setting can have a value from 1 hour to 12 hours. \n Anyone who assumes the role from the CLI or API can use the DurationSeconds API parameter or the duration-seconds CLI parameter to request a longer session. The MaxSessionDuration setting determines the maximum duration that can be requested using the DurationSeconds parameter. If users don't specify a value for the DurationSeconds parameter, their security credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM User Guide." + description: "The maximum session duration (in seconds) that you want to set for the specified\nrole. If you do not specify a value for this setting, the default value of\none hour is applied. This setting can have a value from 1 hour to 12 hours.\n\n\nAnyone who assumes the role from the CLI or API can use the DurationSeconds\nAPI parameter or the duration-seconds CLI parameter to request a longer session.\nThe MaxSessionDuration setting determines the maximum duration that can be\nrequested using the DurationSeconds parameter. If users don't specify a value\nfor the DurationSeconds parameter, their security credentials are valid for\none hour by default. This applies when you use the AssumeRole* API operations\nor the assume-role* CLI operations but does not apply when you use those\noperations to create a console URL. For more information, see Using IAM roles\n(https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the\nIAM User Guide." format: "int64" type: "integer" name: - description: "The name of the role to create. \n IAM user, group, role, and policy names must be unique within the account. Names are not distinguished by case. For example, you cannot create resources named both \"MyResource\" and \"myresource\". \n This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@-" + description: "The name of the role to create.\n\n\nIAM user, group, role, and policy names must be unique within the account.\nNames are not distinguished by case. For example, you cannot create resources\nnamed both \"MyResource\" and \"myresource\".\n\n\nThis parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex))\na string of characters consisting of upper and lowercase alphanumeric characters\nwith no spaces. You can also include any of the following characters: _+=,.@-" type: "string" path: - description: "The path to the role. For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. \n This parameter is optional. If it is not included, it defaults to a slash (/). \n This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters." + description: "The path to the role. For more information about paths, see IAM Identifiers\n(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide.\n\n\nThis parameter is optional. If it is not included, it defaults to a slash\n(/).\n\n\nThis parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex))\na string of characters consisting of either a forward slash (/) by itself\nor a string that must begin and end with forward slashes. In addition, it\ncan contain any ASCII character from the ! (\\u0021) through the DEL character\n(\\u007F), including most punctuation characters, digits, and upper and lowercased\nletters." type: "string" permissionsBoundary: - description: "The ARN of the managed policy that is used to set the permissions boundary for the role. \n A permissions boundary policy defines the maximum permissions that identity-based policies can grant to an entity, but does not grant permissions. Permissions boundaries do not define the maximum permissions that a resource-based policy can grant to an entity. To learn more, see Permissions boundaries for IAM entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) in the IAM User Guide. \n For more information about policy types, see Policy types (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types) in the IAM User Guide." + description: "The ARN of the managed policy that is used to set the permissions boundary\nfor the role.\n\n\nA permissions boundary policy defines the maximum permissions that identity-based\npolicies can grant to an entity, but does not grant permissions. Permissions\nboundaries do not define the maximum permissions that a resource-based policy\ncan grant to an entity. To learn more, see Permissions boundaries for IAM\nentities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html)\nin the IAM User Guide.\n\n\nFor more information about policy types, see Policy types (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types)\nin the IAM User Guide." type: "string" permissionsBoundaryRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -68,10 +68,10 @@ spec: type: "array" policyRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -79,9 +79,9 @@ spec: type: "object" type: "array" tags: - description: "A list of tags that you want to attach to the new role. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide. \n If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created." + description: "A list of tags that you want to attach to the new role. Each tag consists\nof a key name and an associated value. For more information about tagging,\nsee Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html)\nin the IAM User Guide.\n\n\nIf any one of the tags is invalid or if you exceed the allowed maximum number\nof tags, then the entire request fails and the resource is not created." items: - description: "A structure that represents user-provided metadata that can be associated with an IAM resource. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide." + description: "A structure that represents user-provided metadata that can be associated\nwith an IAM resource. For more information about tagging, see Tagging IAM\nresources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html)\nin the IAM User Guide." properties: key: type: "string" @@ -97,13 +97,13 @@ spec: description: "RoleStatus defines the observed state of Role" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -113,9 +113,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -139,14 +139,14 @@ spec: type: "object" type: "array" createDate: - description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the role was created." + description: "The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601),\nwhen the role was created." format: "date-time" type: "string" roleID: - description: "The stable and unique string identifying the role. For more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide." + description: "The stable and unique string identifying the role. For more information about\nIDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html)\nin the IAM User Guide." type: "string" roleLastUsed: - description: "Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) in the IAM user Guide." + description: "Contains information about the last time that an IAM role was used. This\nincludes the date and time and the Region in which the role was last used.\nActivity is only reported for the trailing 400 days. This period can be shorter\nif your Region began supporting these features within the last year. The\nrole might have been used more than 400 days ago. For more information, see\nRegions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period)\nin the IAM user Guide." properties: lastUsedDate: format: "date-time" diff --git a/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/aliases.yaml b/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/aliases.yaml index fb064fd7c..98f05a023 100644 --- a/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/aliases.yaml +++ b/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/aliases.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "aliases.kms.services.k8s.aws" spec: group: "kms.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "Alias is the Schema for the Aliases API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,16 +30,16 @@ spec: description: "AliasSpec defines the desired state of Alias." properties: name: - description: "Specifies the alias name. This value must begin with alias/ followed by a name, such as alias/ExampleAlias. \n The AliasName value must be string of 1-256 characters. It can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is reserved for Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk)." + description: "Specifies the alias name. This value must begin with alias/ followed by a\nname, such as alias/ExampleAlias.\n\n\nThe AliasName value must be string of 1-256 characters. It can contain only\nalphanumeric characters, forward slashes (/), underscores (_), and dashes\n(-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is\nreserved for Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk)." type: "string" targetKeyID: - description: "Associates the alias with the specified customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). The KMS key must be in the same Amazon Web Services Region. \n A valid key ID is required. If you supply a null or empty string value, this operation returns an error. \n For help finding the key ID and ARN, see Finding the Key ID and ARN (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) in the Key Management Service Developer Guide . \n Specify the key ID or key ARN of the KMS key. \n For example: \n * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab \n * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab \n To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey." + description: "Associates the alias with the specified customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk).\nThe KMS key must be in the same Amazon Web Services Region.\n\n\nA valid key ID is required. If you supply a null or empty string value, this\noperation returns an error.\n\n\nFor help finding the key ID and ARN, see Finding the Key ID and ARN (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn)\nin the Key Management Service Developer Guide .\n\n\nSpecify the key ID or key ARN of the KMS key.\n\n\nFor example:\n\n\n * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n\n\n * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n\n\nTo get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey." type: "string" targetKeyRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -52,13 +52,13 @@ spec: description: "AliasStatus defines the observed state of Alias" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -68,9 +68,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/grants.yaml b/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/grants.yaml index 9978cfffa..d5368378b 100644 --- a/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/grants.yaml +++ b/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/grants.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "grants.kms.services.k8s.aws" spec: group: "kms.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "Grant is the Schema for the Grants API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,7 +30,7 @@ spec: description: "GrantSpec defines the desired state of Grant." properties: constraints: - description: "Specifies a grant constraint. \n KMS supports the EncryptionContextEquals and EncryptionContextSubset grant constraints. Each constraint value can include up to 8 encryption context pairs. The encryption context value in each constraint cannot exceed 384 characters. For information about grant constraints, see Using grant constraints (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) in the Key Management Service Developer Guide. For more information about encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the Key Management Service Developer Guide . \n The encryption context grant constraints allow the permissions in the grant only when the encryption context in the request matches (EncryptionContextEquals) or includes (EncryptionContextSubset) the encryption context specified in this structure. \n The encryption context grant constraints are supported only on grant operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) that include an EncryptionContext parameter, such as cryptographic operations on symmetric encryption KMS keys. Grants with grant constraints can include the DescribeKey and RetireGrant operations, but the constraint doesn't apply to these operations. If a grant with a grant constraint includes the CreateGrant operation, the constraint requires that any grants created with the CreateGrant permission have an equally strict or stricter encryption context constraint. \n You cannot use an encryption context grant constraint for cryptographic operations with asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption context." + description: "Specifies a grant constraint.\n\n\nKMS supports the EncryptionContextEquals and EncryptionContextSubset grant\nconstraints. Each constraint value can include up to 8 encryption context\npairs. The encryption context value in each constraint cannot exceed 384\ncharacters. For information about grant constraints, see Using grant constraints\n(https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints)\nin the Key Management Service Developer Guide. For more information about\nencryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context)\nin the Key Management Service Developer Guide .\n\n\nThe encryption context grant constraints allow the permissions in the grant\nonly when the encryption context in the request matches (EncryptionContextEquals)\nor includes (EncryptionContextSubset) the encryption context specified in\nthis structure.\n\n\nThe encryption context grant constraints are supported only on grant operations\n(https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations)\nthat include an EncryptionContext parameter, such as cryptographic operations\non symmetric encryption KMS keys. Grants with grant constraints can include\nthe DescribeKey and RetireGrant operations, but the constraint doesn't apply\nto these operations. If a grant with a grant constraint includes the CreateGrant\noperation, the constraint requires that any grants created with the CreateGrant\npermission have an equally strict or stricter encryption context constraint.\n\n\nYou cannot use an encryption context grant constraint for cryptographic operations\nwith asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption\ncontext." properties: encryptionContextEquals: additionalProperties: @@ -42,36 +42,36 @@ spec: type: "object" type: "object" grantTokens: - description: "A list of grant tokens. \n Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) in the Key Management Service Developer Guide." + description: "A list of grant tokens.\n\n\nUse a grant token when your permission to call this operation comes from\na new grant that has not yet achieved eventual consistency. For more information,\nsee Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token)\nand Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token)\nin the Key Management Service Developer Guide." items: type: "string" type: "array" granteePrincipal: - description: "The identity that gets the permissions specified in the grant. \n To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of an Amazon Web Services principal. Valid Amazon Web Services principals include Amazon Web Services accounts (root), IAM users, IAM roles, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) in the Example ARNs section of the Amazon Web Services General Reference." + description: "The identity that gets the permissions specified in the grant.\n\n\nTo specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)\nof an Amazon Web Services principal. Valid Amazon Web Services principals\ninclude Amazon Web Services accounts (root), IAM users, IAM roles, federated\nusers, and assumed role users. For examples of the ARN syntax to use for\nspecifying a principal, see Amazon Web Services Identity and Access Management\n(IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)\nin the Example ARNs section of the Amazon Web Services General Reference." type: "string" keyID: - description: "Identifies the KMS key for the grant. The grant gives principals permission to use this KMS key. \n Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN. \n For example: \n * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab \n * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab \n To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey." + description: "Identifies the KMS key for the grant. The grant gives principals permission\nto use this KMS key.\n\n\nSpecify the key ID or key ARN of the KMS key. To specify a KMS key in a different\nAmazon Web Services account, you must use the key ARN.\n\n\nFor example:\n\n\n * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n\n\n * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n\n\nTo get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey." type: "string" keyRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" name: - description: "A friendly name for the grant. Use this value to prevent the unintended creation of duplicate grants when retrying this request. \n When this value is absent, all CreateGrant requests result in a new grant with a unique GrantId even if all the supplied parameters are identical. This can result in unintended duplicates when you retry the CreateGrant request. \n When this value is present, you can retry a CreateGrant request with identical parameters; if the grant already exists, the original GrantId is returned without creating a new grant. Note that the returned grant token is unique with every CreateGrant request, even when a duplicate GrantId is returned. All grant tokens for the same grant ID can be used interchangeably." + description: "A friendly name for the grant. Use this value to prevent the unintended creation\nof duplicate grants when retrying this request.\n\n\nWhen this value is absent, all CreateGrant requests result in a new grant\nwith a unique GrantId even if all the supplied parameters are identical.\nThis can result in unintended duplicates when you retry the CreateGrant request.\n\n\nWhen this value is present, you can retry a CreateGrant request with identical\nparameters; if the grant already exists, the original GrantId is returned\nwithout creating a new grant. Note that the returned grant token is unique\nwith every CreateGrant request, even when a duplicate GrantId is returned.\nAll grant tokens for the same grant ID can be used interchangeably." type: "string" operations: - description: "A list of operations that the grant permits. \n This list must include only operations that are permitted in a grant. Also, the operation must be supported on the KMS key. For example, you cannot create a grant for a symmetric encryption KMS key that allows the Sign operation, or a grant for an asymmetric KMS key that allows the GenerateDataKey operation. If you try, KMS returns a ValidationError exception. For details, see Grant operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) in the Key Management Service Developer Guide." + description: "A list of operations that the grant permits.\n\n\nThis list must include only operations that are permitted in a grant. Also,\nthe operation must be supported on the KMS key. For example, you cannot create\na grant for a symmetric encryption KMS key that allows the Sign operation,\nor a grant for an asymmetric KMS key that allows the GenerateDataKey operation.\nIf you try, KMS returns a ValidationError exception. For details, see Grant\noperations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations)\nin the Key Management Service Developer Guide." items: type: "string" type: "array" retiringPrincipal: - description: "The principal that has permission to use the RetireGrant operation to retire the grant. \n To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of an Amazon Web Services principal. Valid Amazon Web Services principals include Amazon Web Services accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) in the Example ARNs section of the Amazon Web Services General Reference. \n The grant determines the retiring principal. Other principals might have permission to retire the grant or revoke the grant. For details, see RevokeGrant and Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) in the Key Management Service Developer Guide." + description: "The principal that has permission to use the RetireGrant operation to retire\nthe grant.\n\n\nTo specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)\nof an Amazon Web Services principal. Valid Amazon Web Services principals\ninclude Amazon Web Services accounts (root), IAM users, federated users,\nand assumed role users. For examples of the ARN syntax to use for specifying\na principal, see Amazon Web Services Identity and Access Management (IAM)\n(https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)\nin the Example ARNs section of the Amazon Web Services General Reference.\n\n\nThe grant determines the retiring principal. Other principals might have\npermission to retire the grant or revoke the grant. For details, see RevokeGrant\nand Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete)\nin the Key Management Service Developer Guide." type: "string" required: - "granteePrincipal" @@ -81,13 +81,13 @@ spec: description: "GrantStatus defines the observed state of Grant" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -97,9 +97,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -123,10 +123,10 @@ spec: type: "object" type: "array" grantID: - description: "The unique identifier for the grant. \n You can use the GrantId in a ListGrants, RetireGrant, or RevokeGrant operation." + description: "The unique identifier for the grant.\n\n\nYou can use the GrantId in a ListGrants, RetireGrant, or RevokeGrant operation." type: "string" grantToken: - description: "The grant token. \n Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) in the Key Management Service Developer Guide." + description: "The grant token.\n\n\nUse a grant token when your permission to call this operation comes from\na new grant that has not yet achieved eventual consistency. For more information,\nsee Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token)\nand Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token)\nin the Key Management Service Developer Guide." type: "string" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/keys.yaml b/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/keys.yaml index acd0f5c4b..97ecf9cb9 100644 --- a/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/keys.yaml +++ b/crd-catalog/aws-controllers-k8s/kms-controller/kms.services.k8s.aws/v1alpha1/keys.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "keys.kms.services.k8s.aws" spec: group: "kms.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "Key is the Schema for the Keys API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,35 +30,35 @@ spec: description: "KeySpec defines the desired state of Key." properties: bypassPolicyLockoutSafetyCheck: - description: "A flag to indicate whether to bypass the key policy lockout safety check. \n Setting this value to true increases the risk that the KMS key becomes unmanageable. Do not set this value to true indiscriminately. \n For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section in the Key Management Service Developer Guide . \n Use this parameter only when you include a policy in the request and you intend to prevent the principal that is making the request from making a subsequent PutKeyPolicy request on the KMS key. \n The default value is false." + description: "A flag to indicate whether to bypass the key policy lockout safety check.\n\n\nSetting this value to true increases the risk that the KMS key becomes unmanageable.\nDo not set this value to true indiscriminately.\n\n\nFor more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)\nsection in the Key Management Service Developer Guide .\n\n\nUse this parameter only when you include a policy in the request and you\nintend to prevent the principal that is making the request from making a\nsubsequent PutKeyPolicy request on the KMS key.\n\n\nThe default value is false." type: "boolean" customKeyStoreID: - description: "Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) and the key material in its associated CloudHSM cluster. To create a KMS key in a custom key store, you must also specify the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs, each in a different Availability Zone in the Region. \n This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store. \n To find the ID of a custom key store, use the DescribeCustomKeyStores operation. \n The response includes the custom key store ID and the ID of the CloudHSM cluster. \n This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store." + description: "Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)\nand the key material in its associated CloudHSM cluster. To create a KMS\nkey in a custom key store, you must also specify the Origin parameter with\na value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the\ncustom key store must have at least two active HSMs, each in a different\nAvailability Zone in the Region.\n\n\nThis parameter is valid only for symmetric encryption KMS keys in a single\nRegion. You cannot create any other type of KMS key in a custom key store.\n\n\nTo find the ID of a custom key store, use the DescribeCustomKeyStores operation.\n\n\nThe response includes the custom key store ID and the ID of the CloudHSM\ncluster.\n\n\nThis operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)\nfeature in KMS, which combines the convenience and extensive integration\nof KMS with the isolation and control of a single-tenant key store." type: "string" description: - description: "A description of the KMS key. \n Use a description that helps you decide whether the KMS key is appropriate for a task. The default value is an empty string (no description). \n To set or change the description after the key is created, use UpdateKeyDescription." + description: "A description of the KMS key.\n\n\nUse a description that helps you decide whether the KMS key is appropriate\nfor a task. The default value is an empty string (no description).\n\n\nTo set or change the description after the key is created, use UpdateKeyDescription." type: "string" enableKeyRotation: type: "boolean" keySpec: - description: "Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) in the Key Management Service Developer Guide . \n The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) in the Key Management Service Developer Guide . \n Amazon Web Services services that are integrated with KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration) use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys. \n KMS supports the following key specs for KMS keys: \n * Symmetric encryption key (default) SYMMETRIC_DEFAULT \n * HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512 \n * Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096 \n * Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1) ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) \n * Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. \n * SM2 key pairs (China Regions only) SM2" + description: "Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT,\ncreates a KMS key with a 256-bit AES-GCM key that is used for encryption\nand decryption, except in China Regions, where it creates a 128-bit symmetric\nkey that uses SM4 encryption. For help choosing a key spec for your KMS key,\nsee Choosing a KMS key type (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose)\nin the Key Management Service Developer Guide .\n\n\nThe KeySpec determines whether the KMS key contains a symmetric key or an\nasymmetric key pair. It also determines the cryptographic algorithms that\nthe KMS key supports. You can't change the KeySpec after the KMS key is created.\nTo further restrict the algorithms that can be used with the KMS key, use\na condition key in its key policy or IAM policy. For more information, see\nkms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm),\nkms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm)\nor kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm)\nin the Key Management Service Developer Guide .\n\n\nAmazon Web Services services that are integrated with KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration)\nuse symmetric encryption KMS keys to protect your data. These services do\nnot support asymmetric KMS keys or HMAC KMS keys.\n\n\nKMS supports the following key specs for KMS keys:\n\n\n * Symmetric encryption key (default) SYMMETRIC_DEFAULT\n\n\n * HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512\n\n\n * Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096\n\n\n * Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1)\n ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1)\n\n\n * Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1),\n commonly used for cryptocurrencies.\n\n\n * SM2 key pairs (China Regions only) SM2" type: "string" keyUsage: - description: "Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created. \n Select only one valid value. \n * For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. \n * For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC. \n * For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY. \n * For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY. \n * For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT or SIGN_VERIFY." + description: "Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations)\nfor which you can use the KMS key. The default value is ENCRYPT_DECRYPT.\nThis parameter is optional when you are creating a symmetric encryption KMS\nkey; otherwise, it is required. You can't change the KeyUsage value after\nthe KMS key is created.\n\n\nSelect only one valid value.\n\n\n * For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT.\n\n\n * For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.\n\n\n * For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT\n or SIGN_VERIFY.\n\n\n * For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY.\n\n\n * For asymmetric KMS keys with SM2 key material (China Regions only),\n specify ENCRYPT_DECRYPT or SIGN_VERIFY." type: "string" multiRegion: - description: "Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key. \n For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False. \n This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) in the Key Management Service Developer Guide. \n This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation. \n You can create a multi-Region version of a symmetric encryption KMS key, an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material. However, you cannot create a multi-Region key in a custom key store." + description: "Creates a multi-Region primary key that you can replicate into other Amazon\nWeb Services Regions. You cannot change this value after you create the KMS\nkey.\n\n\nFor a multi-Region key, set this parameter to True. For a single-Region KMS\nkey, omit this parameter or set it to False. The default value is False.\n\n\nThis operation supports multi-Region keys, an KMS feature that lets you create\nmultiple interoperable KMS keys in different Amazon Web Services Regions.\nBecause these KMS keys have the same key ID, key material, and other metadata,\nyou can use them interchangeably to encrypt data in one Amazon Web Services\nRegion and decrypt it in a different Amazon Web Services Region without re-encrypting\nthe data or making a cross-Region call. For more information about multi-Region\nkeys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)\nin the Key Management Service Developer Guide.\n\n\nThis value creates a primary key, not a replica. To create a replica key,\nuse the ReplicateKey operation.\n\n\nYou can create a multi-Region version of a symmetric encryption KMS key,\nan HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material.\nHowever, you cannot create a multi-Region key in a custom key store." type: "boolean" origin: - description: "The source of the key material for the KMS key. You cannot change the origin after you create the KMS key. The default is AWS_KMS, which means that KMS creates the key material. \n To create a KMS key with no key material (for imported key material), set the value to EXTERNAL. For more information about importing key material into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in the Key Management Service Developer Guide. This value is valid only for symmetric encryption KMS keys. \n To create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) and create its key material in the associated CloudHSM cluster, set this value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to identify the custom key store. This value is valid only for symmetric encryption KMS keys." + description: "The source of the key material for the KMS key. You cannot change the origin\nafter you create the KMS key. The default is AWS_KMS, which means that KMS\ncreates the key material.\n\n\nTo create a KMS key with no key material (for imported key material), set\nthe value to EXTERNAL. For more information about importing key material\ninto KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)\nin the Key Management Service Developer Guide. This value is valid only for\nsymmetric encryption KMS keys.\n\n\nTo create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)\nand create its key material in the associated CloudHSM cluster, set this\nvalue to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to\nidentify the custom key store. This value is valid only for symmetric encryption\nKMS keys." type: "string" policy: - description: "The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) in the Key Management Service Developer Guide. \n If you provide a key policy, it must meet the following criteria: \n * If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy must allow the principal that is making the CreateKey request to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section of the Key Management Service Developer Guide . \n * Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal (for example, an IAM user or role), you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) in the Amazon Web Services Identity and Access Management User Guide. \n A key policy document can include only the following characters: \n * Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range. \n * Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF). \n * The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters \n For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in the Identity and Access Management User Guide ." + description: "The key policy to attach to the KMS key. If you do not specify a key policy,\nKMS attaches a default key policy to the KMS key. For more information, see\nDefault key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default)\nin the Key Management Service Developer Guide.\n\n\nIf you provide a key policy, it must meet the following criteria:\n\n\n * If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy\n must allow the principal that is making the CreateKey request to make\n a subsequent PutKeyPolicy request on the KMS key. This reduces the risk\n that the KMS key becomes unmanageable. For more information, refer to\n the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)\n section of the Key Management Service Developer Guide .\n\n\n * Each statement in the key policy must contain one or more principals.\n The principals in the key policy must exist and be visible to KMS. When\n you create a new Amazon Web Services principal (for example, an IAM user\n or role), you might need to enforce a delay before including the new principal\n in a key policy because the new principal might not be immediately visible\n to KMS. For more information, see Changes that I make are not always immediately\n visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)\n in the Amazon Web Services Identity and Access Management User Guide.\n\n\nA key policy document can include only the following characters:\n\n\n * Printable ASCII characters from the space character (\\u0020) through\n the end of the ASCII character range.\n\n\n * Printable characters in the Basic Latin and Latin-1 Supplement character\n set (through \\u00FF).\n\n\n * The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special\n characters\n\n\nFor information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)\nin the Key Management Service Developer Guide. For help writing and formatting\na JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)\nin the Identity and Access Management User Guide ." type: "string" tags: - description: "Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation. \n Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key Management Service Developer Guide. \n To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) permission in an IAM policy. \n Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one. \n When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html)." + description: "Assigns one or more tags to the KMS key. Use this parameter to tag the KMS\nkey when it is created. To tag an existing KMS key, use the TagResource operation.\n\n\nTagging or untagging a KMS key can allow or deny permission to the KMS key.\nFor details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)\nin the Key Management Service Developer Guide.\n\n\nTo use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)\npermission in an IAM policy.\n\n\nEach tag consists of a tag key and a tag value. Both the tag key and the\ntag value are required, but the tag value can be an empty (null) string.\nYou cannot have more than one tag on a KMS key with the same tag key. If\nyou specify an existing tag key with a different tag value, KMS replaces\nthe current tag value with the specified one.\n\n\nWhen you add tags to an Amazon Web Services resource, Amazon Web Services\ngenerates a cost allocation report with usage and costs aggregated by tags.\nTags can also be used to control access to a KMS key. For details, see Tagging\nKeys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html)." items: - description: "A key-value pair. A tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty (null) strings. \n For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) in the Amazon Web Services Billing and Cost Management User Guide." + description: "A key-value pair. A tag consists of a tag key and a tag value. Tag keys and\ntag values are both required, but tag values can be empty (null) strings.\n\n\nFor information about the rules that apply to tag keys and tag values, see\nUser-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)\nin the Amazon Web Services Billing and Cost Management User Guide." properties: tagKey: type: "string" @@ -71,13 +71,13 @@ spec: description: "KeyStatus defines the observed state of Key" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -87,15 +87,15 @@ spec: - "region" type: "object" awsAccountID: - description: "The twelve-digit account ID of the Amazon Web Services account that owns the KMS key." + description: "The twelve-digit account ID of the Amazon Web Services account that owns\nthe KMS key." type: "string" cloudHsmClusterID: - description: "The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), KMS creates the key material for the KMS key in the associated CloudHSM cluster. This value is present only when the KMS key is created in a custom key store." + description: "The cluster ID of the CloudHSM cluster that contains the key material for\nthe KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),\nKMS creates the key material for the KMS key in the associated CloudHSM cluster.\nThis value is present only when the KMS key is created in a custom key store." type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -123,36 +123,36 @@ spec: format: "date-time" type: "string" deletionDate: - description: "The date and time after which KMS deletes this KMS key. This value is present only when the KMS key is scheduled for deletion, that is, when its KeyState is PendingDeletion. \n When the primary key in a multi-Region key is scheduled for deletion but still has replica keys, its key state is PendingReplicaDeletion and the length of its waiting period is displayed in the PendingDeletionWindowInDays field." + description: "The date and time after which KMS deletes this KMS key. This value is present\nonly when the KMS key is scheduled for deletion, that is, when its KeyState\nis PendingDeletion.\n\n\nWhen the primary key in a multi-Region key is scheduled for deletion but\nstill has replica keys, its key state is PendingReplicaDeletion and the length\nof its waiting period is displayed in the PendingDeletionWindowInDays field." format: "date-time" type: "string" enabled: - description: "Specifies whether the KMS key is enabled. When KeyState is Enabled this value is true, otherwise it is false." + description: "Specifies whether the KMS key is enabled. When KeyState is Enabled this value\nis true, otherwise it is false." type: "boolean" encryptionAlgorithms: - description: "The encryption algorithms that the KMS key supports. You cannot use the KMS key with other encryption algorithms within KMS. \n This value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT." + description: "The encryption algorithms that the KMS key supports. You cannot use the KMS\nkey with other encryption algorithms within KMS.\n\n\nThis value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT." items: type: "string" type: "array" expirationModel: - description: "Specifies whether the KMS key's key material expires. This value is present only when Origin is EXTERNAL, otherwise this value is omitted." + description: "Specifies whether the KMS key's key material expires. This value is present\nonly when Origin is EXTERNAL, otherwise this value is omitted." type: "string" keyID: description: "The globally unique identifier for the KMS key." type: "string" keyManager: - description: "The manager of the KMS key. KMS keys in your Amazon Web Services account are either customer managed or Amazon Web Services managed. For more information about the difference, see KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) in the Key Management Service Developer Guide." + description: "The manager of the KMS key. KMS keys in your Amazon Web Services account\nare either customer managed or Amazon Web Services managed. For more information\nabout the difference, see KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys)\nin the Key Management Service Developer Guide." type: "string" keyState: - description: "The current status of the KMS key. \n For more information about how key state affects the use of a KMS key, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the Key Management Service Developer Guide." + description: "The current status of the KMS key.\n\n\nFor more information about how key state affects the use of a KMS key, see\nKey states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)\nin the Key Management Service Developer Guide." type: "string" macAlgorithms: - description: "The message authentication code (MAC) algorithm that the HMAC KMS key supports. \n This value is present only when the KeyUsage of the KMS key is GENERATE_VERIFY_MAC." + description: "The message authentication code (MAC) algorithm that the HMAC KMS key supports.\n\n\nThis value is present only when the KeyUsage of the KMS key is GENERATE_VERIFY_MAC." items: type: "string" type: "array" multiRegionConfiguration: - description: "Lists the primary and replica keys in same multi-Region key. This field is present only when the value of the MultiRegion field is True. \n For more information about any listed KMS key, use the DescribeKey operation. \n * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA key. \n * PrimaryKey displays the key ARN and Region of the primary key. This field displays the current KMS key if it is the primary key. \n * ReplicaKeys displays the key ARNs and Regions of all replica keys. This field includes the current KMS key if it is a replica key." + description: "Lists the primary and replica keys in same multi-Region key. This field is\npresent only when the value of the MultiRegion field is True.\n\n\nFor more information about any listed KMS key, use the DescribeKey operation.\n\n\n * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA\n key.\n\n\n * PrimaryKey displays the key ARN and Region of the primary key. This\n field displays the current KMS key if it is the primary key.\n\n\n * ReplicaKeys displays the key ARNs and Regions of all replica keys. This\n field includes the current KMS key if it is a replica key." properties: multiRegionKeyType: type: "string" @@ -176,16 +176,16 @@ spec: type: "array" type: "object" pendingDeletionWindowInDays: - description: "The waiting period before the primary key in a multi-Region key is deleted. This waiting period begins when the last of its replica keys is deleted. This value is present only when the KeyState of the KMS key is PendingReplicaDeletion. That indicates that the KMS key is the primary key in a multi-Region key, it is scheduled for deletion, and it still has existing replica keys. \n When a single-Region KMS key or a multi-Region replica key is scheduled for deletion, its deletion date is displayed in the DeletionDate field. However, when the primary key in a multi-Region key is scheduled for deletion, its waiting period doesn't begin until all of its replica keys are deleted. This value displays that waiting period. When the last replica key in the multi-Region key is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion to PendingDeletion and the deletion date appears in the DeletionDate field." + description: "The waiting period before the primary key in a multi-Region key is deleted.\nThis waiting period begins when the last of its replica keys is deleted.\nThis value is present only when the KeyState of the KMS key is PendingReplicaDeletion.\nThat indicates that the KMS key is the primary key in a multi-Region key,\nit is scheduled for deletion, and it still has existing replica keys.\n\n\nWhen a single-Region KMS key or a multi-Region replica key is scheduled for\ndeletion, its deletion date is displayed in the DeletionDate field. However,\nwhen the primary key in a multi-Region key is scheduled for deletion, its\nwaiting period doesn't begin until all of its replica keys are deleted. This\nvalue displays that waiting period. When the last replica key in the multi-Region\nkey is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion\nto PendingDeletion and the deletion date appears in the DeletionDate field." format: "int64" type: "integer" signingAlgorithms: - description: "The signing algorithms that the KMS key supports. You cannot use the KMS key with other signing algorithms within KMS. \n This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY." + description: "The signing algorithms that the KMS key supports. You cannot use the KMS\nkey with other signing algorithms within KMS.\n\n\nThis field appears only when the KeyUsage of the KMS key is SIGN_VERIFY." items: type: "string" type: "array" validTo: - description: "The time at which the imported key material expires. When the key material expires, KMS deletes the key material and the KMS key becomes unusable. This value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted." + description: "The time at which the imported key material expires. When the key material\nexpires, KMS deletes the key material and the KMS key becomes unusable. This\nvalue is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel\nis KEY_MATERIAL_EXPIRES, otherwise this value is omitted." format: "date-time" type: "string" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/mq-controller/mq.services.k8s.aws/v1alpha1/brokers.yaml b/crd-catalog/aws-controllers-k8s/mq-controller/mq.services.k8s.aws/v1alpha1/brokers.yaml index e9a90c791..9841b55b1 100644 --- a/crd-catalog/aws-controllers-k8s/mq-controller/mq.services.k8s.aws/v1alpha1/brokers.yaml +++ b/crd-catalog/aws-controllers-k8s/mq-controller/mq.services.k8s.aws/v1alpha1/brokers.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "brokers.mq.services.k8s.aws" spec: group: "mq.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "Broker is the Schema for the Brokers API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -34,7 +34,7 @@ spec: autoMinorVersionUpgrade: type: "boolean" configuration: - description: "A list of information about the configuration. \n Does not apply to RabbitMQ brokers." + description: "A list of information about the configuration.\n\n\nDoes not apply to RabbitMQ brokers." properties: id: type: "string" @@ -47,7 +47,7 @@ spec: deploymentMode: type: "string" encryptionOptions: - description: "Does not apply to RabbitMQ brokers. \n Encryption options for the broker." + description: "Does not apply to RabbitMQ brokers.\n\n\nEncryption options for the broker." properties: kmsKeyID: type: "string" @@ -61,7 +61,7 @@ spec: hostInstanceType: type: "string" ldapServerMetadata: - description: "Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. \n Does not apply to RabbitMQ brokers." + description: "Optional. The metadata of the LDAP server used to authenticate and authorize\nconnections to the broker.\n\n\nDoes not apply to RabbitMQ brokers." properties: hosts: items: @@ -97,7 +97,7 @@ spec: type: "boolean" type: "object" maintenanceWindowStartTime: - description: "The scheduled time period relative to UTC during which Amazon MQ begins to apply pending updates or patches to the broker." + description: "The scheduled time period relative to UTC during which Amazon MQ begins to\napply pending updates or patches to the broker." properties: dayOfWeek: type: "string" @@ -112,10 +112,10 @@ spec: type: "boolean" securityGroupRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -134,10 +134,10 @@ spec: type: "array" subnetRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -150,7 +150,7 @@ spec: type: "object" users: items: - description: "A user associated with the broker. For RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console." + description: "A user associated with the broker. For RabbitMQ brokers, one and only one\nadministrative user is accepted and created when a broker is first provisioned.\nAll subsequent broker users are created by making RabbitMQ API calls directly\nto brokers or via the RabbitMQ web console." properties: consoleAccess: type: "boolean" @@ -159,7 +159,7 @@ spec: type: "string" type: "array" password: - description: "SecretKeyReference combines a k8s corev1.SecretReference with a specific key within the referred-to Secret" + description: "SecretKeyReference combines a k8s corev1.SecretReference with a\nspecific key within the referred-to Secret" properties: key: description: "Key is the key within the secret" @@ -192,13 +192,13 @@ spec: description: "BrokerStatus defines the observed state of Broker" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -226,9 +226,9 @@ spec: brokerState: type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/opensearchservice-controller/opensearchservice.services.k8s.aws/v1alpha1/domains.yaml b/crd-catalog/aws-controllers-k8s/opensearchservice-controller/opensearchservice.services.k8s.aws/v1alpha1/domains.yaml index 47bfbb204..1102be3a3 100644 --- a/crd-catalog/aws-controllers-k8s/opensearchservice-controller/opensearchservice.services.k8s.aws/v1alpha1/domains.yaml +++ b/crd-catalog/aws-controllers-k8s/opensearchservice-controller/opensearchservice.services.k8s.aws/v1alpha1/domains.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "domains.opensearchservice.services.k8s.aws" spec: group: "opensearchservice.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "Domain is the Schema for the Domains API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -35,7 +35,7 @@ spec: advancedOptions: additionalProperties: type: "string" - description: "Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Advanced cluster parameters (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomain-configure-advanced-options) for more information." + description: "Option to allow references to indices in an HTTP request body. Must be false\nwhen configuring access to individual sub-resources. By default, the value\nis true. See Advanced cluster parameters (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomain-configure-advanced-options)\nfor more information." type: "object" advancedSecurityOptions: description: "Specifies advanced security options." @@ -50,12 +50,12 @@ spec: description: "Credentials for the master user: username and password, ARN, or both." properties: masterUserARN: - description: "The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS Identity and Access Management for more information." + description: "The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities\n(http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS\nIdentity and Access Management for more information." type: "string" masterUserName: type: "string" masterUserPassword: - description: "SecretKeyReference combines a k8s corev1.SecretReference with a specific key within the referred-to Secret" + description: "SecretKeyReference combines a k8s corev1.SecretReference with a\nspecific key within the referred-to Secret" properties: key: description: "Key is the key within the secret" @@ -105,18 +105,18 @@ spec: type: "string" maintenanceSchedules: items: - description: "Specifies the Auto-Tune maintenance schedule. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information." + description: "Specifies the Auto-Tune maintenance schedule. See Auto-Tune for Amazon OpenSearch\nService (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html)\nfor more information." properties: cronExpressionForRecurrence: type: "string" duration: - description: "The maintenance schedule duration: duration value and duration unit. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information." + description: "The maintenance schedule duration: duration value and duration unit. See\nAuto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html)\nfor more information." properties: unit: - description: "The unit of a maintenance schedule duration. Valid value is HOUR. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information." + description: "The unit of a maintenance schedule duration. Valid value is HOUR. See Auto-Tune\nfor Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html)\nfor more information." type: "string" value: - description: "Integer to specify the value of a maintenance schedule duration. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information." + description: "Integer to specify the value of a maintenance schedule duration. See Auto-Tune\nfor Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html)\nfor more information." format: "int64" type: "integer" type: "object" @@ -127,7 +127,7 @@ spec: type: "array" type: "object" clusterConfig: - description: "Configuration options for a domain. Specifies the instance type and number of instances in the domain." + description: "Configuration options for a domain. Specifies the instance type and number\nof instances in the domain." properties: coldStorageOptions: description: "Specifies the configuration for cold storage options such as enabled" @@ -155,7 +155,7 @@ spec: warmType: type: "string" zoneAwarenessConfig: - description: "The zone awareness configuration for the domain cluster, such as the number of availability zones." + description: "The zone awareness configuration for the domain cluster, such as the number\nof availability zones." properties: availabilityZoneCount: format: "int64" @@ -165,7 +165,7 @@ spec: type: "boolean" type: "object" cognitoOptions: - description: "Options to specify the Cognito user and identity pools for OpenSearch Dashboards authentication. For more information, see Configuring Amazon Cognito authentication for OpenSearch Dashboards (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/cognito-auth.html)." + description: "Options to specify the Cognito user and identity pools for OpenSearch Dashboards\nauthentication. For more information, see Configuring Amazon Cognito authentication\nfor OpenSearch Dashboards (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/cognito-auth.html)." properties: enabled: type: "boolean" @@ -182,7 +182,7 @@ spec: customEndpoint: type: "string" customEndpointCertificateARN: - description: "The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS Identity and Access Management for more information." + description: "The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities\n(http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS\nIdentity and Access Management for more information." type: "string" customEndpointEnabled: type: "boolean" @@ -192,7 +192,7 @@ spec: type: "string" type: "object" ebsOptions: - description: "Options to enable, disable, and specify the type and size of EBS storage volumes." + description: "Options to enable, disable, and specify the type and size of EBS storage\nvolumes." properties: ebsEnabled: type: "boolean" @@ -206,7 +206,7 @@ spec: format: "int64" type: "integer" volumeType: - description: "The type of EBS volume, standard, gp2, gp3 or io1. See Configuring EBS-based Storage (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/opensearch-createupdatedomains.html#opensearch-createdomain-configure-ebs) for more information." + description: "The type of EBS volume, standard, gp2, gp3 or io1. See Configuring EBS-based\nStorage (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/opensearch-createupdatedomains.html#opensearch-createdomain-configure-ebs)\nfor more information." type: "string" type: "object" encryptionAtRestOptions: @@ -218,11 +218,11 @@ spec: type: "string" type: "object" engineVersion: - description: "String of format Elasticsearch_X.Y or OpenSearch_X.Y to specify the engine version for the Amazon OpenSearch Service domain. For example, \"OpenSearch_1.0\" or \"Elasticsearch_7.9\". For more information, see Creating and managing Amazon OpenSearch Service domains (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomains)." + description: "String of format Elasticsearch_X.Y or OpenSearch_X.Y to specify the engine\nversion for the Amazon OpenSearch Service domain. For example, \"OpenSearch_1.0\"\nor \"Elasticsearch_7.9\". For more information, see Creating and managing Amazon\nOpenSearch Service domains (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomains)." type: "string" logPublishingOptions: additionalProperties: - description: "Log Publishing option that is set for a given domain. Attributes and their details: * CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to publish logs to. \n * Enabled: Whether the log publishing for a given log type is enabled or not." + description: "Log Publishing option that is set for a given domain. Attributes and their\ndetails:\n * CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to publish\n logs to.\n\n\n * Enabled: Whether the log publishing for a given log type is enabled\n or not." properties: cloudWatchLogsLogGroupARN: description: "ARN of the Cloudwatch log group to publish logs to." @@ -230,10 +230,10 @@ spec: enabled: type: "boolean" type: "object" - description: "Map of LogType and LogPublishingOption, each containing options to publish a given type of OpenSearch log." + description: "Map of LogType and LogPublishingOption, each containing options to publish\na given type of OpenSearch log." type: "object" name: - description: "The name of the Amazon OpenSearch Service domain you're creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a lowercase letter and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen)." + description: "The name of the Amazon OpenSearch Service domain you're creating. Domain\nnames are unique across the domains owned by an account within an AWS region.\nDomain names must start with a lowercase letter and can contain the following\ncharacters: a-z (lowercase), 0-9, and - (hyphen)." type: "string" nodeToNodeEncryptionOptions: description: "Node-to-node encryption options." @@ -247,15 +247,15 @@ spec: description: "A key value pair for a resource tag." properties: key: - description: "A string of length from 1 to 128 characters that specifies the key for a tag. Tag keys must be unique for the domain to which they're attached." + description: "A string of length from 1 to 128 characters that specifies the key for a\ntag. Tag keys must be unique for the domain to which they're attached." type: "string" value: - description: "A string of length from 0 to 256 characters that specifies the value for a tag. Tag values can be null and don't have to be unique in a tag set." + description: "A string of length from 0 to 256 characters that specifies the value for\na tag. Tag values can be null and don't have to be unique in a tag set." type: "string" type: "object" type: "array" vpcOptions: - description: "Options to specify the subnets and security groups for a VPC endpoint. For more information, see Launching your Amazon OpenSearch Service domains using a VPC (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html)." + description: "Options to specify the subnets and security groups for a VPC endpoint. For\nmore information, see Launching your Amazon OpenSearch Service domains using\na VPC (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html)." properties: securityGroupIDs: items: @@ -273,13 +273,13 @@ spec: description: "DomainStatus defines the observed state of Domain" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -297,9 +297,9 @@ spec: type: "string" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -323,10 +323,10 @@ spec: type: "object" type: "array" created: - description: "The domain creation status. True if the creation of a domain is complete. False if domain creation is still in progress." + description: "The domain creation status. True if the creation of a domain is complete.\nFalse if domain creation is still in progress." type: "boolean" deleted: - description: "The domain deletion status. True if a delete request has been received for the domain but resource cleanup is still in progress. False if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned." + description: "The domain deletion status. True if a delete request has been received for\nthe domain but resource cleanup is still in progress. False if the domain\nhas not been deleted. Once domain deletion is complete, the status of the\ndomain is no longer returned." type: "boolean" domainID: description: "The unique identifier for the specified domain." @@ -337,10 +337,10 @@ spec: endpoints: additionalProperties: type: "string" - description: "Map containing the domain endpoints used to submit index and search requests. Example key, value: 'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'." + description: "Map containing the domain endpoints used to submit index and search requests.\nExample key, value: 'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'." type: "object" processing: - description: "The status of the domain configuration. True if Amazon OpenSearch Service is processing configuration changes. False if the configuration is active." + description: "The status of the domain configuration. True if Amazon OpenSearch Service\nis processing configuration changes. False if the configuration is active." type: "boolean" serviceSoftwareOptions: description: "The current status of the domain's service software." @@ -371,7 +371,7 @@ spec: type: "integer" type: "object" upgradeProcessing: - description: "The status of a domain version upgrade. True if Amazon OpenSearch Service is undergoing a version upgrade. False if the configuration is active." + description: "The status of a domain version upgrade. True if Amazon OpenSearch Service\nis undergoing a version upgrade. False if the configuration is active." type: "boolean" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/alertmanagerdefinitions.yaml b/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/alertmanagerdefinitions.yaml index 3b5a68216..f0c550f5f 100644 --- a/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/alertmanagerdefinitions.yaml +++ b/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/alertmanagerdefinitions.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "alertmanagerdefinitions.prometheusservice.services.k8s.aws" spec: group: "prometheusservice.services.k8s.aws" @@ -23,10 +23,10 @@ spec: description: "AlertManagerDefinition is the Schema for the AlertManagerDefinitions API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -39,10 +39,10 @@ spec: description: "The ID of the workspace in which to create the alert manager definition." type: "string" workspaceRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -55,13 +55,13 @@ spec: description: "AlertManagerDefinitionStatus defines the observed state of AlertManagerDefinition" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -71,9 +71,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/rulegroupsnamespaces.yaml b/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/rulegroupsnamespaces.yaml index 7a0d74b58..426107e70 100644 --- a/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/rulegroupsnamespaces.yaml +++ b/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/rulegroupsnamespaces.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "rulegroupsnamespaces.prometheusservice.services.k8s.aws" spec: group: "prometheusservice.services.k8s.aws" @@ -25,10 +25,10 @@ spec: description: "RuleGroupsNamespace is the Schema for the RuleGroupsNamespaces API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -49,10 +49,10 @@ spec: description: "The ID of the workspace in which to create the rule group namespace." type: "string" workspaceRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -66,13 +66,13 @@ spec: description: "RuleGroupsNamespaceStatus defines the observed state of RuleGroupsNamespace" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -82,9 +82,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/workspaces.yaml b/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/workspaces.yaml index 04d4ed8bf..46c16879e 100644 --- a/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/workspaces.yaml +++ b/crd-catalog/aws-controllers-k8s/prometheusservice-controller/prometheusservice.services.k8s.aws/v1alpha1/workspaces.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "workspaces.prometheusservice.services.k8s.aws" spec: group: "prometheusservice.services.k8s.aws" @@ -23,10 +23,10 @@ spec: description: "Workspace is the Schema for the Workspaces API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -34,7 +34,7 @@ spec: description: "WorkspaceSpec defines the desired state of Workspace." properties: alias: - description: "An optional user-assigned alias for this workspace. This alias is for user reference and does not need to be unique." + description: "An optional user-assigned alias for this workspace. This alias is for user\nreference and does not need to be unique." type: "string" tags: additionalProperties: @@ -46,13 +46,13 @@ spec: description: "WorkspaceStatus defines the observed state of Workspace" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -62,9 +62,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusterparametergroups.yaml b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusterparametergroups.yaml index 989d3bfd8..15e26f112 100644 --- a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusterparametergroups.yaml +++ b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusterparametergroups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dbclusterparametergroups.rds.services.k8s.aws" spec: group: "rds.services.k8s.aws" @@ -19,33 +19,33 @@ spec: description: "DBClusterParameterGroup is the Schema for the DBClusterParameterGroups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DBClusterParameterGroupSpec defines the desired state of DBClusterParameterGroup. \n Contains the details of an Amazon RDS DB cluster parameter group. \n This data type is used as a response element in the DescribeDBClusterParameterGroups action." + description: "DBClusterParameterGroupSpec defines the desired state of DBClusterParameterGroup.\n\n\nContains the details of an Amazon RDS DB cluster parameter group.\n\n\nThis data type is used as a response element in the DescribeDBClusterParameterGroups\naction." properties: description: description: "The description for the DB cluster parameter group." type: "string" family: - description: "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family. \n Aurora MySQL \n Example: aurora5.6, aurora-mysql5.7, aurora-mysql8.0 \n Aurora PostgreSQL \n Example: aurora-postgresql9.6 \n RDS for MySQL \n Example: mysql8.0 \n RDS for PostgreSQL \n Example: postgres12 \n To list all of the available parameter group families for a DB engine, use the following command: \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine \n For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command: \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql \n The output contains duplicates. \n The following are the valid DB engine values: \n * aurora (for MySQL 5.6-compatible Aurora) \n * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) \n * aurora-postgresql \n * mysql \n * postgres" + description: "The DB cluster parameter group family name. A DB cluster parameter group\ncan be associated with one and only one DB cluster parameter group family,\nand can be applied only to a DB cluster running a database engine and engine\nversion compatible with that DB cluster parameter group family.\n\n\nAurora MySQL\n\n\nExample: aurora5.6, aurora-mysql5.7, aurora-mysql8.0\n\n\nAurora PostgreSQL\n\n\nExample: aurora-postgresql9.6\n\n\nRDS for MySQL\n\n\nExample: mysql8.0\n\n\nRDS for PostgreSQL\n\n\nExample: postgres12\n\n\nTo list all of the available parameter group families for a DB engine, use\nthe following command:\n\n\naws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"\n--engine \n\n\nFor example, to list all of the available parameter group families for the\nAurora PostgreSQL DB engine, use the following command:\n\n\naws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"\n--engine aurora-postgresql\n\n\nThe output contains duplicates.\n\n\nThe following are the valid DB engine values:\n\n\n * aurora (for MySQL 5.6-compatible Aurora)\n\n\n * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)\n\n\n * aurora-postgresql\n\n\n * mysql\n\n\n * postgres" type: "string" name: - description: "The name of the DB cluster parameter group. \n Constraints: \n * Must not match the name of an existing DB cluster parameter group. \n This value is stored as a lowercase string." + description: "The name of the DB cluster parameter group.\n\n\nConstraints:\n\n\n * Must not match the name of an existing DB cluster parameter group.\n\n\nThis value is stored as a lowercase string." type: "string" parameterOverrides: additionalProperties: type: "string" type: "object" parameters: - description: "A list of parameters in the DB cluster parameter group to modify. \n Valid Values (for the application method): immediate | pending-reboot \n You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters. \n When the application method is immediate, changes to dynamic parameters are applied immediately to the DB clusters associated with the parameter group. When the application method is pending-reboot, changes to dynamic and static parameters are applied after a reboot without failover to the DB clusters associated with the parameter group." + description: "A list of parameters in the DB cluster parameter group to modify.\n\n\nValid Values (for the application method): immediate | pending-reboot\n\n\nYou can use the immediate value with dynamic parameters only. You can use\nthe pending-reboot value for both dynamic and static parameters.\n\n\nWhen the application method is immediate, changes to dynamic parameters are\napplied immediately to the DB clusters associated with the parameter group.\nWhen the application method is pending-reboot, changes to dynamic and static\nparameters are applied after a reboot without failover to the DB clusters\nassociated with the parameter group." items: - description: "This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions. \n This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions." + description: "This data type is used as a request parameter in the ModifyDBParameterGroup\nand ResetDBParameterGroup actions.\n\n\nThis data type is used as a response element in the DescribeEngineDefaultParameters\nand DescribeDBParameters actions." properties: allowedValues: type: "string" @@ -76,7 +76,7 @@ spec: tags: description: "Tags to assign to the DB cluster parameter group." items: - description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair. \n For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide." + description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\n\nFor more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html)\nin the Amazon RDS User Guide." properties: key: type: "string" @@ -93,13 +93,13 @@ spec: description: "DBClusterParameterGroupStatus defines the observed state of DBClusterParameterGroup" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -109,9 +109,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -137,7 +137,7 @@ spec: parameterOverrideStatuses: description: "Provides a list of parameters for the DB cluster parameter group." items: - description: "This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions. \n This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions." + description: "This data type is used as a request parameter in the ModifyDBParameterGroup\nand ResetDBParameterGroup actions.\n\n\nThis data type is used as a response element in the DescribeEngineDefaultParameters\nand DescribeDBParameters actions." properties: allowedValues: type: "string" diff --git a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusters.yaml b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusters.yaml index e3bbfb2af..571956038 100644 --- a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusters.yaml +++ b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbclusters.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dbclusters.rds.services.k8s.aws" spec: group: "rds.services.k8s.aws" @@ -19,72 +19,72 @@ spec: description: "DBCluster is the Schema for the DBClusters API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DBClusterSpec defines the desired state of DBCluster. \n Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster. \n For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster. \n For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime. \n For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) in the Amazon Aurora User Guide. \n For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the Amazon RDS User Guide." + description: "DBClusterSpec defines the desired state of DBCluster.\n\n\nContains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.\n\n\nFor an Amazon Aurora DB cluster, this data type is used as a response element\nin the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster,\nModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot,\nRestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.\n\n\nFor a Multi-AZ DB cluster, this data type is used as a response element in\nthe operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster,\nModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.\n\n\nFor more information on Amazon Aurora DB clusters, see What is Amazon Aurora?\n(https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html)\nin the Amazon Aurora User Guide.\n\n\nFor more information on Multi-AZ DB clusters, see Multi-AZ deployments with\ntwo readable standby DB instances (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html)\nin the Amazon RDS User Guide." properties: allocatedStorage: - description: "The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. \n This setting is required to create a Multi-AZ DB cluster. \n Valid for: Multi-AZ DB clusters only" + description: "The amount of storage in gibibytes (GiB) to allocate to each DB instance\nin the Multi-AZ DB cluster.\n\n\nThis setting is required to create a Multi-AZ DB cluster.\n\n\nValid for: Multi-AZ DB clusters only" format: "int64" type: "integer" autoMinorVersionUpgrade: - description: "A value that indicates whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. \n Valid for: Multi-AZ DB clusters only" + description: "A value that indicates whether minor engine upgrades are applied automatically\nto the DB cluster during the maintenance window. By default, minor engine\nupgrades are applied automatically.\n\n\nValid for: Multi-AZ DB clusters only" type: "boolean" availabilityZones: - description: "A list of Availability Zones (AZs) where DB instances in the DB cluster can be created. \n For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters only" + description: "A list of Availability Zones (AZs) where DB instances in the DB cluster can\nbe created.\n\n\nFor information on Amazon Web Services Regions and Availability Zones, see\nChoosing the Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html)\nin the Amazon Aurora User Guide.\n\n\nValid for: Aurora DB clusters only" items: type: "string" type: "array" backtrackWindow: - description: "The target backtrack window, in seconds. To disable backtracking, set this value to 0. \n Default: 0 \n Constraints: \n * If specified, this value must be set to a number from 0 to 259,200 (72 hours). \n Valid for: Aurora MySQL DB clusters only" + description: "The target backtrack window, in seconds. To disable backtracking, set this\nvalue to 0.\n\n\nDefault: 0\n\n\nConstraints:\n\n\n * If specified, this value must be set to a number from 0 to 259,200 (72\n hours).\n\n\nValid for: Aurora MySQL DB clusters only" format: "int64" type: "integer" backupRetentionPeriod: - description: "The number of days for which automated backups are retained. \n Default: 1 \n Constraints: \n * Must be a value from 1 to 35 \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The number of days for which automated backups are retained.\n\n\nDefault: 1\n\n\nConstraints:\n\n\n * Must be a value from 1 to 35\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" format: "int64" type: "integer" characterSetName: - description: "A value that indicates that the DB cluster should be associated with the specified CharacterSet. \n Valid for: Aurora DB clusters only" + description: "A value that indicates that the DB cluster should be associated with the\nspecified CharacterSet.\n\n\nValid for: Aurora DB clusters only" type: "string" copyTagsToSnapshot: - description: "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "A value that indicates whether to copy all tags from the DB cluster to snapshots\nof the DB cluster. The default is not to copy them.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "boolean" databaseName: - description: "The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The name for your database of up to 64 alphanumeric characters. If you do\nnot provide a name, Amazon RDS doesn't create a database in the DB cluster\nyou are creating.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" dbClusterIdentifier: - description: "The DB cluster identifier. This parameter is stored as a lowercase string. \n Constraints: \n * Must contain from 1 to 63 letters, numbers, or hyphens. \n * First character must be a letter. \n * Can't end with a hyphen or contain two consecutive hyphens. \n Example: my-cluster1 \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The DB cluster identifier. This parameter is stored as a lowercase string.\n\n\nConstraints:\n\n\n * Must contain from 1 to 63 letters, numbers, or hyphens.\n\n\n * First character must be a letter.\n\n\n * Can't end with a hyphen or contain two consecutive hyphens.\n\n\nExample: my-cluster1\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" dbClusterInstanceClass: - description: "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. \n For the full list of DB instance classes and availability for your engine, see DB instance class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide. \n This setting is required to create a Multi-AZ DB cluster. \n Valid for: Multi-AZ DB clusters only" + description: "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster,\nfor example db.m6gd.xlarge. Not all DB instance classes are available in\nall Amazon Web Services Regions, or for all database engines.\n\n\nFor the full list of DB instance classes and availability for your engine,\nsee DB instance class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html)\nin the Amazon RDS User Guide.\n\n\nThis setting is required to create a Multi-AZ DB cluster.\n\n\nValid for: Multi-AZ DB clusters only" type: "string" dbClusterParameterGroupName: - description: "The name of the DB cluster parameter group to associate with this DB cluster. If you do not specify a value, then the default DB cluster parameter group for the specified DB engine and version is used. \n Constraints: \n * If supplied, must match the name of an existing DB cluster parameter group. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The name of the DB cluster parameter group to associate with this DB cluster.\nIf you do not specify a value, then the default DB cluster parameter group\nfor the specified DB engine and version is used.\n\n\nConstraints:\n\n\n * If supplied, must match the name of an existing DB cluster parameter\n group.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" dbClusterParameterGroupRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" dbSubnetGroupName: - description: "A DB subnet group to associate with this DB cluster. \n This setting is required to create a Multi-AZ DB cluster. \n Constraints: Must match the name of an existing DBSubnetGroup. Must not be default. \n Example: mydbsubnetgroup \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "A DB subnet group to associate with this DB cluster.\n\n\nThis setting is required to create a Multi-AZ DB cluster.\n\n\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be\ndefault.\n\n\nExample: mydbsubnetgroup\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" dbSubnetGroupRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -94,68 +94,68 @@ spec: description: "Reserved for future use." type: "string" deletionProtection: - description: "A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "A value that indicates whether the DB cluster has deletion protection enabled.\nThe database can't be deleted when deletion protection is enabled. By default,\ndeletion protection isn't enabled.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "boolean" destinationRegion: description: "DestinationRegion is used for presigning the request to a given region." type: "string" domain: - description: "The Active Directory directory ID to create the DB cluster in. \n For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster. \n For more information, see Kerberos authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters only" + description: "The Active Directory directory ID to create the DB cluster in.\n\n\nFor Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication\nto authenticate users that connect to the DB cluster.\n\n\nFor more information, see Kerberos authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html)\nin the Amazon Aurora User Guide.\n\n\nValid for: Aurora DB clusters only" type: "string" domainIAMRoleName: - description: "Specify the name of the IAM role to be used when making API calls to the Directory Service. \n Valid for: Aurora DB clusters only" + description: "Specify the name of the IAM role to be used when making API calls to the\nDirectory Service.\n\n\nValid for: Aurora DB clusters only" type: "string" enableCloudwatchLogsExports: - description: "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. \n RDS for MySQL \n Possible values are error, general, and slowquery. \n RDS for PostgreSQL \n Possible values are postgresql and upgrade. \n Aurora MySQL \n Possible values are audit, error, general, and slowquery. \n Aurora PostgreSQL \n Possible value is postgresql. \n For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the Amazon RDS User Guide. \n For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The list of log types that need to be enabled for exporting to CloudWatch\nLogs. The values in the list depend on the DB engine being used.\n\n\nRDS for MySQL\n\n\nPossible values are error, general, and slowquery.\n\n\nRDS for PostgreSQL\n\n\nPossible values are postgresql and upgrade.\n\n\nAurora MySQL\n\n\nPossible values are audit, error, general, and slowquery.\n\n\nAurora PostgreSQL\n\n\nPossible value is postgresql.\n\n\nFor more information about exporting CloudWatch Logs for Amazon RDS, see\nPublishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch)\nin the Amazon RDS User Guide.\n\n\nFor more information about exporting CloudWatch Logs for Amazon Aurora, see\nPublishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch)\nin the Amazon Aurora User Guide.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" items: type: "string" type: "array" enableGlobalWriteForwarding: - description: "A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database. \n You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by the FailoverGlobalCluster API operation, but it does nothing until then. \n Valid for: Aurora DB clusters only" + description: "A value that indicates whether to enable this DB cluster to forward write\noperations to the primary cluster of an Aurora global database (GlobalCluster).\nBy default, write operations are not allowed on Aurora DB clusters that are\nsecondary clusters in an Aurora global database.\n\n\nYou can set this value only on Aurora DB clusters that are members of an\nAurora global database. With this parameter enabled, a secondary cluster\ncan forward writes to the current primary cluster and the resulting changes\nare replicated back to this cluster. For the primary DB cluster of an Aurora\nglobal database, this value is used immediately if the primary is demoted\nby the FailoverGlobalCluster API operation, but it does nothing until then.\n\n\nValid for: Aurora DB clusters only" type: "boolean" enableHTTPEndpoint: - description: "A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. \n When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor. \n For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters only" + description: "A value that indicates whether to enable the HTTP endpoint for an Aurora\nServerless v1 DB cluster. By default, the HTTP endpoint is disabled.\n\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API\nfor running SQL queries on the Aurora Serverless v1 DB cluster. You can also\nquery your database from inside the RDS console with the query editor.\n\n\nFor more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html)\nin the Amazon Aurora User Guide.\n\n\nValid for: Aurora DB clusters only" type: "boolean" enableIAMDatabaseAuthentication: - description: "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. \n For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters only" + description: "A value that indicates whether to enable mapping of Amazon Web Services Identity\nand Access Management (IAM) accounts to database accounts. By default, mapping\nisn't enabled.\n\n\nFor more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html)\nin the Amazon Aurora User Guide.\n\n\nValid for: Aurora DB clusters only" type: "boolean" enablePerformanceInsights: - description: "A value that indicates whether to turn on Performance Insights for the DB cluster. \n For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the Amazon RDS User Guide. \n Valid for: Multi-AZ DB clusters only" + description: "A value that indicates whether to turn on Performance Insights for the DB\ncluster.\n\n\nFor more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html)\nin the Amazon RDS User Guide.\n\n\nValid for: Multi-AZ DB clusters only" type: "boolean" engine: - description: "The name of the database engine to be used for this DB cluster. \n Valid Values: \n * aurora-mysql \n * aurora-postgresql \n * mysql \n * postgres \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The name of the database engine to be used for this DB cluster.\n\n\nValid Values:\n\n\n * aurora-mysql\n\n\n * aurora-postgresql\n\n\n * mysql\n\n\n * postgres\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" engineMode: - description: "The DB engine mode of the DB cluster, either provisioned or serverless. \n The serverless engine mode only applies for Aurora Serverless v1 DB clusters. \n Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide: \n * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) \n * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) \n * Limitations of parallel query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) \n * Limitations of Aurora global databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) \n Valid for: Aurora DB clusters only" + description: "The DB engine mode of the DB cluster, either provisioned or serverless.\n\n\nThe serverless engine mode only applies for Aurora Serverless v1 DB clusters.\n\n\nLimitations and requirements apply to some DB engine modes. For more information,\nsee the following sections in the Amazon Aurora User Guide:\n\n\n * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n\n\n * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\n\n * Limitations of parallel query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations)\n\n\n * Limitations of Aurora global databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations)\n\n\nValid for: Aurora DB clusters only" type: "string" engineVersion: - description: "The version number of the database engine to use. \n To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (MySQL 8.0-compatible), use the following command: \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\" \n You can supply either 5.7 or 8.0 to use the default engine version for Aurora MySQL version 2 or version 3, respectively. \n To list all of the available engine versions for Aurora PostgreSQL, use the following command: \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\" \n To list all of the available engine versions for RDS for MySQL, use the following command: \n aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\" \n To list all of the available engine versions for RDS for PostgreSQL, use the following command: \n aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\" \n Aurora MySQL \n For information, see Database engine updates for Amazon Aurora MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) in the Amazon Aurora User Guide. \n Aurora PostgreSQL \n For information, see Amazon Aurora PostgreSQL releases and engine versions (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) in the Amazon Aurora User Guide. \n MySQL \n For information, see Amazon RDS for MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the Amazon RDS User Guide. \n PostgreSQL \n For information, see Amazon RDS for PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the Amazon RDS User Guide. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The version number of the database engine to use.\n\n\nTo list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible)\nand version 3 (MySQL 8.0-compatible), use the following command:\n\n\naws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n\n\nYou can supply either 5.7 or 8.0 to use the default engine version for Aurora\nMySQL version 2 or version 3, respectively.\n\n\nTo list all of the available engine versions for Aurora PostgreSQL, use the\nfollowing command:\n\n\naws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n\n\nTo list all of the available engine versions for RDS for MySQL, use the following\ncommand:\n\n\naws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"\n\n\nTo list all of the available engine versions for RDS for PostgreSQL, use\nthe following command:\n\n\naws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"\n\n\nAurora MySQL\n\n\nFor information, see Database engine updates for Amazon Aurora MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html)\nin the Amazon Aurora User Guide.\n\n\nAurora PostgreSQL\n\n\nFor information, see Amazon Aurora PostgreSQL releases and engine versions\n(https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html)\nin the Amazon Aurora User Guide.\n\n\nMySQL\n\n\nFor information, see Amazon RDS for MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt)\nin the Amazon RDS User Guide.\n\n\nPostgreSQL\n\n\nFor information, see Amazon RDS for PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts)\nin the Amazon RDS User Guide.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" globalClusterIdentifier: - description: "The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster. \n Valid for: Aurora DB clusters only" + description: "The global cluster ID of an Aurora cluster that becomes the primary cluster\nin the new global database cluster.\n\n\nValid for: Aurora DB clusters only" type: "string" iops: - description: "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. \n For information about valid IOPS values, see Provisioned IOPS storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the Amazon RDS User Guide. \n This setting is required to create a Multi-AZ DB cluster. \n Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster. \n Valid for: Multi-AZ DB clusters only" + description: "The amount of Provisioned IOPS (input/output operations per second) to be\ninitially allocated for each DB instance in the Multi-AZ DB cluster.\n\n\nFor information about valid IOPS values, see Provisioned IOPS storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS)\nin the Amazon RDS User Guide.\n\n\nThis setting is required to create a Multi-AZ DB cluster.\n\n\nConstraints: Must be a multiple between .5 and 50 of the storage amount for\nthe DB cluster.\n\n\nValid for: Multi-AZ DB clusters only" format: "int64" type: "integer" kmsKeyID: - description: "The Amazon Web Services KMS key identifier for an encrypted DB cluster. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. \n When a KMS key isn't specified in KmsKeyId: \n * If ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS will use the KMS key used to encrypt the source. Otherwise, Amazon RDS will use your default KMS key. \n * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then Amazon RDS will use your default KMS key. \n There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. \n If you create a read replica of an encrypted DB cluster in another Amazon Web Services Region, you must set KmsKeyId to a KMS key identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The Amazon Web Services KMS key identifier for an encrypted DB cluster.\n\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias\nARN, or alias name for the KMS key. To use a KMS key in a different Amazon\nWeb Services account, specify the key ARN or alias ARN.\n\n\nWhen a KMS key isn't specified in KmsKeyId:\n\n\n * If ReplicationSourceIdentifier identifies an encrypted source, then\n Amazon RDS will use the KMS key used to encrypt the source. Otherwise,\n Amazon RDS will use your default KMS key.\n\n\n * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier\n isn't specified, then Amazon RDS will use your default KMS key.\n\n\nThere is a default KMS key for your Amazon Web Services account. Your Amazon\nWeb Services account has a different default KMS key for each Amazon Web\nServices Region.\n\n\nIf you create a read replica of an encrypted DB cluster in another Amazon\nWeb Services Region, you must set KmsKeyId to a KMS key identifier that is\nvalid in the destination Amazon Web Services Region. This KMS key is used\nto encrypt the read replica in that Amazon Web Services Region.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" kmsKeyRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" manageMasterUserPassword: - description: "A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager. \n For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the Amazon Aurora User Guide. \n Constraints: \n * Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "A value that indicates whether to manage the master user password with Amazon\nWeb Services Secrets Manager.\n\n\nFor more information, see Password management with Amazon Web Services Secrets\nManager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html)\nin the Amazon RDS User Guide and Password management with Amazon Web Services\nSecrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html)\nin the Amazon Aurora User Guide.\n\n\nConstraints:\n\n\n * Can't manage the master user password with Amazon Web Services Secrets\n Manager if MasterUserPassword is specified.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "boolean" masterUserPassword: - description: "The password for the master database user. This password can contain any printable ASCII character except \"/\", \"\"\", or \"@\". \n Constraints: \n * Must contain from 8 to 41 characters. \n * Can't be specified if ManageMasterUserPassword is turned on. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The password for the master database user. This password can contain any\nprintable ASCII character except \"/\", \"\"\", or \"@\".\n\n\nConstraints:\n\n\n * Must contain from 8 to 41 characters.\n\n\n * Can't be specified if ManageMasterUserPassword is turned on.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" properties: key: description: "Key is the key within the secret" @@ -171,62 +171,62 @@ spec: type: "object" x-kubernetes-map-type: "atomic" masterUserSecretKMSKeyID: - description: "The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. \n This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. \n If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. \n There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The Amazon Web Services KMS key identifier to encrypt a secret that is automatically\ngenerated and managed in Amazon Web Services Secrets Manager.\n\n\nThis setting is valid only if the master user password is managed by RDS\nin Amazon Web Services Secrets Manager for the DB cluster.\n\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias\nARN, or alias name for the KMS key. To use a KMS key in a different Amazon\nWeb Services account, specify the key ARN or alias ARN.\n\n\nIf you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager\nKMS key is used to encrypt the secret. If the secret is in a different Amazon\nWeb Services account, then you can't use the aws/secretsmanager KMS key to\nencrypt the secret, and you must use a customer managed KMS key.\n\n\nThere is a default KMS key for your Amazon Web Services account. Your Amazon\nWeb Services account has a different default KMS key for each Amazon Web\nServices Region.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" masterUserSecretKMSKeyRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" masterUsername: - description: "The name of the master user for the DB cluster. \n Constraints: \n * Must be 1 to 16 letters or numbers. \n * First character must be a letter. \n * Can't be a reserved word for the chosen database engine. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The name of the master user for the DB cluster.\n\n\nConstraints:\n\n\n * Must be 1 to 16 letters or numbers.\n\n\n * First character must be a letter.\n\n\n * Can't be a reserved word for the chosen database engine.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" monitoringInterval: - description: "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0. \n If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. \n Valid Values: 0, 1, 5, 10, 15, 30, 60 \n Valid for: Multi-AZ DB clusters only" + description: "The interval, in seconds, between points when Enhanced Monitoring metrics\nare collected for the DB cluster. To turn off collecting Enhanced Monitoring\nmetrics, specify 0. The default is 0.\n\n\nIf MonitoringRoleArn is specified, also set MonitoringInterval to a value\nother than 0.\n\n\nValid Values: 0, 1, 5, 10, 15, 30, 60\n\n\nValid for: Multi-AZ DB clusters only" format: "int64" type: "integer" monitoringRoleARN: - description: "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the Amazon RDS User Guide. \n If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. \n Valid for: Multi-AZ DB clusters only" + description: "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send\nEnhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess.\nFor information on creating a monitoring role, see Setting up and enabling\nEnhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling)\nin the Amazon RDS User Guide.\n\n\nIf MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn\nvalue.\n\n\nValid for: Multi-AZ DB clusters only" type: "string" networkType: - description: "The network type of the DB cluster. \n Valid values: \n * IPV4 \n * DUAL \n The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). \n For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the Amazon Aurora User Guide. \n Valid for: Aurora DB clusters only" + description: "The network type of the DB cluster.\n\n\nValid values:\n\n\n * IPV4\n\n\n * DUAL\n\n\nThe network type is determined by the DBSubnetGroup specified for the DB\ncluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and\nthe IPv6 protocols (DUAL).\n\n\nFor more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html)\nin the Amazon Aurora User Guide.\n\n\nValid for: Aurora DB clusters only" type: "string" optionGroupName: - description: "A value that indicates that the DB cluster should be associated with the specified option group. \n DB clusters are associated with a default option group that can't be modified." + description: "A value that indicates that the DB cluster should be associated with the\nspecified option group.\n\n\nDB clusters are associated with a default option group that can't be modified." type: "string" performanceInsightsKMSKeyID: - description: "The Amazon Web Services KMS key identifier for encryption of Performance Insights data. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. \n If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. \n Valid for: Multi-AZ DB clusters only" + description: "The Amazon Web Services KMS key identifier for encryption of Performance\nInsights data.\n\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias\nARN, or alias name for the KMS key.\n\n\nIf you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon\nRDS uses your default KMS key. There is a default KMS key for your Amazon\nWeb Services account. Your Amazon Web Services account has a different default\nKMS key for each Amazon Web Services Region.\n\n\nValid for: Multi-AZ DB clusters only" type: "string" performanceInsightsRetentionPeriod: - description: "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid: \n * 7 \n * month * 31, where month is a number of months from 1-23 \n * 731 \n For example, the following values are valid: \n * 93 (3 months * 31) \n * 341 (11 months * 31) \n * 589 (19 months * 31) \n * 731 \n If you specify a retention period such as 94, which isn't a valid value, RDS issues an error. \n Valid for: Multi-AZ DB clusters only" + description: "The number of days to retain Performance Insights data. The default is 7\ndays. The following values are valid:\n\n\n * 7\n\n\n * month * 31, where month is a number of months from 1-23\n\n\n * 731\n\n\nFor example, the following values are valid:\n\n\n * 93 (3 months * 31)\n\n\n * 341 (11 months * 31)\n\n\n * 589 (19 months * 31)\n\n\n * 731\n\n\nIf you specify a retention period such as 94, which isn't a valid value,\nRDS issues an error.\n\n\nValid for: Multi-AZ DB clusters only" format: "int64" type: "integer" port: - description: "The port number on which the instances in the DB cluster accept connections. \n RDS for MySQL and Aurora MySQL \n Default: 3306 \n Valid values: 1150-65535 \n RDS for PostgreSQL and Aurora PostgreSQL \n Default: 5432 \n Valid values: 1150-65535 \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The port number on which the instances in the DB cluster accept connections.\n\n\nRDS for MySQL and Aurora MySQL\n\n\nDefault: 3306\n\n\nValid values: 1150-65535\n\n\nRDS for PostgreSQL and Aurora PostgreSQL\n\n\nDefault: 5432\n\n\nValid values: 1150-65535\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" format: "int64" type: "integer" preSignedURL: - description: "When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster. \n The presigned URL must be a valid request for the CreateDBCluster API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy. \n The presigned URL request must contain the following parameter values: \n * KmsKeyId - The KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. \n * DestinationRegion - The name of the Amazon Web Services Region that Aurora read replica will be created in. \n * ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1. \n To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). \n If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. \n Valid for: Aurora DB clusters only" + description: "When you are replicating a DB cluster from one Amazon Web Services GovCloud\n(US) Region to another, an URL that contains a Signature Version 4 signed\nrequest for the CreateDBCluster operation to be called in the source Amazon\nWeb Services Region where the DB cluster is replicated from. Specify PreSignedUrl\nonly when you are performing cross-Region replication from an encrypted DB\ncluster.\n\n\nThe presigned URL must be a valid request for the CreateDBCluster API operation\nthat can run in the source Amazon Web Services Region that contains the encrypted\nDB cluster to copy.\n\n\nThe presigned URL request must contain the following parameter values:\n\n\n * KmsKeyId - The KMS key identifier for the KMS key to use to encrypt\n the copy of the DB cluster in the destination Amazon Web Services Region.\n This should refer to the same KMS key for both the CreateDBCluster operation\n that is called in the destination Amazon Web Services Region, and the\n operation contained in the presigned URL.\n\n\n * DestinationRegion - The name of the Amazon Web Services Region that\n Aurora read replica will be created in.\n\n\n * ReplicationSourceIdentifier - The DB cluster identifier for the encrypted\n DB cluster to be copied. This identifier must be in the Amazon Resource\n Name (ARN) format for the source Amazon Web Services Region. For example,\n if you are copying an encrypted DB cluster from the us-west-2 Amazon Web\n Services Region, then your ReplicationSourceIdentifier would look like\n Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.\n\n\nTo learn how to generate a Signature Version 4 signed request, see Authenticating\nRequests: Using Query Parameters (Amazon Web Services Signature Version 4)\n(https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)\nand Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).\n\n\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\nSourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl\nmanually. Specifying SourceRegion autogenerates a presigned URL that is a\nvalid request for the operation that can run in the source Amazon Web Services\nRegion.\n\n\nValid for: Aurora DB clusters only" type: "string" preferredBackupWindow: - description: "The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. \n The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the Amazon Aurora User Guide. \n Constraints: \n * Must be in the format hh24:mi-hh24:mi. \n * Must be in Universal Coordinated Time (UTC). \n * Must not conflict with the preferred maintenance window. \n * Must be at least 30 minutes. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The daily time range during which automated backups are created if automated\nbackups are enabled using the BackupRetentionPeriod parameter.\n\n\nThe default is a 30-minute window selected at random from an 8-hour block\nof time for each Amazon Web Services Region. To view the time blocks available,\nsee Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow)\nin the Amazon Aurora User Guide.\n\n\nConstraints:\n\n\n * Must be in the format hh24:mi-hh24:mi.\n\n\n * Must be in Universal Coordinated Time (UTC).\n\n\n * Must not conflict with the preferred maintenance window.\n\n\n * Must be at least 30 minutes.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" preferredMaintenanceWindow: - description: "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). \n Format: ddd:hh24:mi-ddd:hh24:mi \n The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the Amazon Aurora User Guide. \n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. \n Constraints: Minimum 30-minute window. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The weekly time range during which system maintenance can occur, in Universal\nCoordinated Time (UTC).\n\n\nFormat: ddd:hh24:mi-ddd:hh24:mi\n\n\nThe default is a 30-minute window selected at random from an 8-hour block\nof time for each Amazon Web Services Region, occurring on a random day of\nthe week. To see the time blocks available, see Adjusting the Preferred DB\nCluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora)\nin the Amazon Aurora User Guide.\n\n\nValid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n\n\nConstraints: Minimum 30-minute window.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" publiclyAccessible: - description: "A value that indicates whether the DB cluster is publicly accessible. \n When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. \n When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. \n Default: The default behavior varies depending on whether DBSubnetGroupName is specified. \n If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: \n * If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. \n * If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. \n If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: \n * If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. \n * If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. \n Valid for: Multi-AZ DB clusters only" + description: "A value that indicates whether the DB cluster is publicly accessible.\n\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS)\nendpoint resolves to the private IP address from within the DB cluster's\nvirtual private cloud (VPC). It resolves to the public IP address from outside\nof the DB cluster's VPC. Access to the DB cluster is ultimately controlled\nby the security group it uses. That public access isn't permitted if the\nsecurity group assigned to the DB cluster doesn't permit it.\n\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster\nwith a DNS name that resolves to a private IP address.\n\n\nDefault: The default behavior varies depending on whether DBSubnetGroupName\nis specified.\n\n\nIf DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified,\nthe following applies:\n\n\n * If the default VPC in the target Region doesn’t have an internet gateway\n attached to it, the DB cluster is private.\n\n\n * If the default VPC in the target Region has an internet gateway attached\n to it, the DB cluster is public.\n\n\nIf DBSubnetGroupName is specified, and PubliclyAccessible isn't specified,\nthe following applies:\n\n\n * If the subnets are part of a VPC that doesn’t have an internet gateway\n attached to it, the DB cluster is private.\n\n\n * If the subnets are part of a VPC that has an internet gateway attached\n to it, the DB cluster is public.\n\n\nValid for: Multi-AZ DB clusters only" type: "boolean" replicationSourceIdentifier: - description: "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if\nthis DB cluster is created as a read replica.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" scalingConfiguration: - description: "For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster. \n Valid for: Aurora DB clusters only" + description: "For DB clusters in serverless DB engine mode, the scaling properties of the\nDB cluster.\n\n\nValid for: Aurora DB clusters only" properties: autoPause: type: "boolean" @@ -246,7 +246,7 @@ spec: type: "string" type: "object" serverlessV2ScalingConfiguration: - description: "Contains the scaling configuration of an Aurora Serverless v2 DB cluster. \n For more information, see Using Amazon Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) in the Amazon Aurora User Guide." + description: "Contains the scaling configuration of an Aurora Serverless v2 DB cluster.\n\n\nFor more information, see Using Amazon Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html)\nin the Amazon Aurora User Guide." properties: maxCapacity: type: "number" @@ -254,21 +254,21 @@ spec: type: "number" type: "object" snapshotIdentifier: - description: "The identifier for the DB snapshot or DB cluster snapshot to restore from. \n You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot. \n Constraints: \n * Must match the identifier of an existing Snapshot. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "The identifier for the DB snapshot or DB cluster snapshot to restore from.\n\n\nYou can use either the name or the Amazon Resource Name (ARN) to specify\na DB cluster snapshot. However, you can use only the ARN to specify a DB\nsnapshot.\n\n\nConstraints:\n\n\n * Must match the identifier of an existing Snapshot.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "string" sourceRegion: - description: "SourceRegion is the source region where the resource exists. This is not sent over the wire and is only used for presigning. This value should always have the same region as the source ARN." + description: "SourceRegion is the source region where the resource exists. This is not\nsent over the wire and is only used for presigning. This value should always\nhave the same region as the source ARN." type: "string" storageEncrypted: - description: "A value that indicates whether the DB cluster is encrypted. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "A value that indicates whether the DB cluster is encrypted.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" type: "boolean" storageType: - description: "Specifies the storage type to be associated with the DB cluster. \n This setting is required to create a Multi-AZ DB cluster. \n Valid values: io1 \n When specified, a value for the Iops parameter is required. \n Default: io1 \n Valid for: Multi-AZ DB clusters only" + description: "Specifies the storage type to be associated with the DB cluster.\n\n\nThis setting is required to create a Multi-AZ DB cluster.\n\n\nValid values: io1\n\n\nWhen specified, a value for the Iops parameter is required.\n\n\nDefault: io1\n\n\nValid for: Multi-AZ DB clusters only" type: "string" tags: - description: "Tags to assign to the DB cluster. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "Tags to assign to the DB cluster.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" items: - description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair. \n For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide." + description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\n\nFor more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html)\nin the Amazon RDS User Guide." properties: key: type: "string" @@ -277,16 +277,16 @@ spec: type: "object" type: "array" vpcSecurityGroupIDs: - description: "A list of EC2 VPC security groups to associate with this DB cluster. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters" + description: "A list of EC2 VPC security groups to associate with this DB cluster.\n\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" items: type: "string" type: "array" vpcSecurityGroupRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -301,13 +301,13 @@ spec: description: "DBClusterStatus defines the observed state of DBCluster" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -317,21 +317,21 @@ spec: - "region" type: "object" activityStreamKMSKeyID: - description: "The Amazon Web Services KMS key identifier used for encrypting messages in the database activity stream. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key." + description: "The Amazon Web Services KMS key identifier used for encrypting messages in\nthe database activity stream.\n\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias\nARN, or alias name for the KMS key." type: "string" activityStreamKinesisStreamName: - description: "The name of the Amazon Kinesis data stream used for the database activity stream." + description: "The name of the Amazon Kinesis data stream used for the database activity\nstream." type: "string" activityStreamMode: - description: "The mode of the database activity stream. Database events such as a change or access generate an activity stream event. The database session can handle these events either synchronously or asynchronously." + description: "The mode of the database activity stream. Database events such as a change\nor access generate an activity stream event. The database session can handle\nthese events either synchronously or asynchronously." type: "string" activityStreamStatus: description: "The status of the database activity stream." type: "string" associatedRoles: - description: "Provides a list of the Amazon Web Services Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf." + description: "Provides a list of the Amazon Web Services Identity and Access Management\n(IAM) roles that are associated with the DB cluster. IAM roles that are associated\nwith a DB cluster grant permission for the DB cluster to access other Amazon\nWeb Services on your behalf." items: - description: "Describes an Amazon Web Services Identity and Access Management (IAM) role that is associated with a DB cluster." + description: "Describes an Amazon Web Services Identity and Access Management (IAM) role\nthat is associated with a DB cluster." properties: featureName: type: "string" @@ -350,20 +350,20 @@ spec: format: "int64" type: "integer" capacity: - description: "The current capacity of an Aurora Serverless v1 DB cluster. The capacity is 0 (zero) when the cluster is paused. \n For more information about Aurora Serverless v1, see Using Amazon Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) in the Amazon Aurora User Guide." + description: "The current capacity of an Aurora Serverless v1 DB cluster. The capacity\nis 0 (zero) when the cluster is paused.\n\n\nFor more information about Aurora Serverless v1, see Using Amazon Aurora\nServerless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html)\nin the Amazon Aurora User Guide." format: "int64" type: "integer" cloneGroupID: description: "Identifies the clone group to which the DB cluster is associated." type: "string" clusterCreateTime: - description: "Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC)." + description: "Specifies the time when the DB cluster was created, in Universal Coordinated\nTime (UTC)." format: "date-time" type: "string" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -387,7 +387,7 @@ spec: type: "object" type: "array" crossAccountClone: - description: "Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account." + description: "Specifies whether the DB cluster is a clone of a DB cluster owned by a different\nAmazon Web Services account." type: "boolean" customEndpoints: description: "Identifies all custom endpoints associated with the cluster." @@ -425,15 +425,15 @@ spec: description: "Specifies the name of the DB cluster parameter group for the DB cluster." type: "string" dbClusterResourceID: - description: "The Amazon Web Services Region-unique, immutable identifier for the DB cluster. This identifier is found in Amazon Web Services CloudTrail log entries whenever the KMS key for the DB cluster is accessed." + description: "The Amazon Web Services Region-unique, immutable identifier for the DB cluster.\nThis identifier is found in Amazon Web Services CloudTrail log entries whenever\nthe KMS key for the DB cluster is accessed." type: "string" dbSubnetGroup: - description: "Specifies information on the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group." + description: "Specifies information on the subnet group associated with the DB cluster,\nincluding the name, description, and subnets in the subnet group." type: "string" domainMemberships: description: "The Active Directory Domain membership records associated with the DB cluster." items: - description: "An Active Directory Domain membership record associated with the DB instance or cluster." + description: "An Active Directory Domain membership record associated with the DB instance\nor cluster." properties: domain: type: "string" @@ -450,11 +450,11 @@ spec: format: "date-time" type: "string" earliestRestorableTime: - description: "The earliest time to which a database can be restored with point-in-time restore." + description: "The earliest time to which a database can be restored with point-in-time\nrestore." format: "date-time" type: "string" enabledCloudwatchLogsExports: - description: "A list of log types that this DB cluster is configured to export to CloudWatch Logs. \n Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html) in the Amazon Aurora User Guide." + description: "A list of log types that this DB cluster is configured to export to CloudWatch\nLogs.\n\n\nLog types vary by DB engine. For information about the log types for each\nDB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html)\nin the Amazon Aurora User Guide." items: type: "string" type: "array" @@ -462,26 +462,26 @@ spec: description: "Specifies the connection endpoint for the primary instance of the DB cluster." type: "string" globalWriteForwardingRequested: - description: "Specifies whether you have requested to enable write forwarding for a secondary cluster in an Aurora global database. Because write forwarding takes time to enable, check the value of GlobalWriteForwardingStatus to confirm that the request has completed before using the write forwarding feature for this cluster." + description: "Specifies whether you have requested to enable write forwarding for a secondary\ncluster in an Aurora global database. Because write forwarding takes time\nto enable, check the value of GlobalWriteForwardingStatus to confirm that\nthe request has completed before using the write forwarding feature for this\ncluster." type: "boolean" globalWriteForwardingStatus: - description: "Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it." + description: "Specifies whether a secondary cluster in an Aurora global database has write\nforwarding enabled, not enabled, or is in the process of enabling it." type: "string" hostedZoneID: description: "Specifies the ID that Amazon Route 53 assigns when you create a hosted zone." type: "string" httpEndpointEnabled: - description: "A value that indicates whether the HTTP endpoint for an Aurora Serverless v1 DB cluster is enabled. \n When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor. \n For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the Amazon Aurora User Guide." + description: "A value that indicates whether the HTTP endpoint for an Aurora Serverless\nv1 DB cluster is enabled.\n\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API\nfor running SQL queries on the Aurora Serverless v1 DB cluster. You can also\nquery your database from inside the RDS console with the query editor.\n\n\nFor more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html)\nin the Amazon Aurora User Guide." type: "boolean" iamDatabaseAuthenticationEnabled: - description: "A value that indicates whether the mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled." + description: "A value that indicates whether the mapping of Amazon Web Services Identity\nand Access Management (IAM) accounts to database accounts is enabled." type: "boolean" latestRestorableTime: - description: "Specifies the latest time to which a database can be restored with point-in-time restore." + description: "Specifies the latest time to which a database can be restored with point-in-time\nrestore." format: "date-time" type: "string" masterUserSecret: - description: "Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. \n For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the Amazon Aurora User Guide." + description: "Contains the secret managed by RDS in Amazon Web Services Secrets Manager\nfor the master user password.\n\n\nFor more information, see Password management with Amazon Web Services Secrets\nManager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html)\nin the Amazon RDS User Guide and Password management with Amazon Web Services\nSecrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html)\nin the Amazon Aurora User Guide." properties: kmsKeyID: type: "string" @@ -494,7 +494,7 @@ spec: description: "Specifies whether the DB cluster has instances in multiple Availability Zones." type: "boolean" pendingModifiedValues: - description: "A value that specifies that changes to the DB cluster are pending. This element is only included when changes are pending. Specific changes are identified by subelements." + description: "A value that specifies that changes to the DB cluster are pending. This element\nis only included when changes are pending. Specific changes are identified\nby subelements." properties: allocatedStorage: format: "int64" @@ -514,7 +514,7 @@ spec: masterUserPassword: type: "string" pendingCloudwatchLogsExports: - description: "A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated." + description: "A list of the log types whose configuration is still pending. In other words,\nthese log types are in the process of being activated or deactivated." properties: logTypesToDisable: items: @@ -530,22 +530,22 @@ spec: description: "Specifies the progress of the operation as a percentage." type: "string" performanceInsightsEnabled: - description: "True if Performance Insights is enabled for the DB cluster, and otherwise false. \n This setting is only for non-Aurora Multi-AZ DB clusters." + description: "True if Performance Insights is enabled for the DB cluster, and otherwise\nfalse.\n\n\nThis setting is only for non-Aurora Multi-AZ DB clusters." type: "boolean" readReplicaIdentifiers: - description: "Contains one or more identifiers of the read replicas associated with this DB cluster." + description: "Contains one or more identifiers of the read replicas associated with this\nDB cluster." items: type: "string" type: "array" readerEndpoint: - description: "The reader endpoint for the DB cluster. The reader endpoint for a DB cluster load-balances connections across the Aurora Replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Aurora distributes the connection requests among the Aurora Replicas in the DB cluster. This functionality can help balance your read workload across multiple Aurora Replicas in your DB cluster. \n If a failover occurs, and the Aurora Replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Aurora Replicas in the cluster, you can then reconnect to the reader endpoint." + description: "The reader endpoint for the DB cluster. The reader endpoint for a DB cluster\nload-balances connections across the Aurora Replicas that are available in\na DB cluster. As clients request new connections to the reader endpoint,\nAurora distributes the connection requests among the Aurora Replicas in the\nDB cluster. This functionality can help balance your read workload across\nmultiple Aurora Replicas in your DB cluster.\n\n\nIf a failover occurs, and the Aurora Replica that you are connected to is\npromoted to be the primary instance, your connection is dropped. To continue\nsending your read workload to other Aurora Replicas in the cluster, you can\nthen reconnect to the reader endpoint." type: "string" status: description: "Specifies the current state of this DB cluster." type: "string" tagList: items: - description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair. \n For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide." + description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\n\nFor more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html)\nin the Amazon RDS User Guide." properties: key: type: "string" @@ -556,7 +556,7 @@ spec: vpcSecurityGroups: description: "Provides a list of VPC security groups that the DB cluster belongs to." items: - description: "This data type is used as a response element for queries on VPC security group membership." + description: "This data type is used as a response element for queries on VPC security\ngroup membership." properties: status: type: "string" diff --git a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbinstances.yaml b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbinstances.yaml index 6be41cd78..2e17e0534 100644 --- a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbinstances.yaml +++ b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbinstances.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dbinstances.rds.services.k8s.aws" spec: group: "rds.services.k8s.aws" @@ -23,143 +23,143 @@ spec: description: "DBInstance is the Schema for the DBInstances API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DBInstanceSpec defines the desired state of DBInstance. \n Contains the details of an Amazon RDS DB instance. \n This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance." + description: "DBInstanceSpec defines the desired state of DBInstance.\n\n\nContains the details of an Amazon RDS DB instance.\n\n\nThis data type is used as a response element in the operations CreateDBInstance,\nCreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance,\nPromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3,\nRestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance." properties: allocatedStorage: - description: "The amount of storage in gibibytes (GiB) to allocate for the DB instance. \n Type: Integer \n Amazon Aurora \n Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume. \n Amazon RDS Custom \n Constraints to the amount of storage for each storage type are the following: \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. \n * Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. \n MySQL \n Constraints to the amount of storage for each storage type are the following: \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. \n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. \n * Magnetic storage (standard): Must be an integer from 5 to 3072. \n MariaDB \n Constraints to the amount of storage for each storage type are the following: \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. \n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. \n * Magnetic storage (standard): Must be an integer from 5 to 3072. \n PostgreSQL \n Constraints to the amount of storage for each storage type are the following: \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. \n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. \n * Magnetic storage (standard): Must be an integer from 5 to 3072. \n Oracle \n Constraints to the amount of storage for each storage type are the following: \n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. \n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. \n * Magnetic storage (standard): Must be an integer from 10 to 3072. \n SQL Server \n Constraints to the amount of storage for each storage type are the following: \n * General Purpose (SSD) storage (gp2, gp3): Enterprise and Standard editions: Must be an integer from 20 to 16384. Web and Express editions: Must be an integer from 20 to 16384. \n * Provisioned IOPS storage (io1): Enterprise and Standard editions: Must be an integer from 100 to 16384. Web and Express editions: Must be an integer from 100 to 16384. \n * Magnetic storage (standard): Enterprise and Standard editions: Must be an integer from 20 to 1024. Web and Express editions: Must be an integer from 20 to 1024." + description: "The amount of storage in gibibytes (GiB) to allocate for the DB instance.\n\n\nType: Integer\n\n\nAmazon Aurora\n\n\nNot applicable. Aurora cluster volumes automatically grow as the amount of\ndata in your database increases, though you are only charged for the space\nthat you use in an Aurora cluster volume.\n\n\nAmazon RDS Custom\n\n\nConstraints to the amount of storage for each storage type are the following:\n\n\n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40\n to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.\n\n\n * Provisioned IOPS storage (io1): Must be an integer from 40 to 65536\n for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server.\n\n\nMySQL\n\n\nConstraints to the amount of storage for each storage type are the following:\n\n\n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20\n to 65536.\n\n\n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.\n\n\n * Magnetic storage (standard): Must be an integer from 5 to 3072.\n\n\nMariaDB\n\n\nConstraints to the amount of storage for each storage type are the following:\n\n\n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20\n to 65536.\n\n\n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.\n\n\n * Magnetic storage (standard): Must be an integer from 5 to 3072.\n\n\nPostgreSQL\n\n\nConstraints to the amount of storage for each storage type are the following:\n\n\n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20\n to 65536.\n\n\n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.\n\n\n * Magnetic storage (standard): Must be an integer from 5 to 3072.\n\n\nOracle\n\n\nConstraints to the amount of storage for each storage type are the following:\n\n\n * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20\n to 65536.\n\n\n * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.\n\n\n * Magnetic storage (standard): Must be an integer from 10 to 3072.\n\n\nSQL Server\n\n\nConstraints to the amount of storage for each storage type are the following:\n\n\n * General Purpose (SSD) storage (gp2, gp3): Enterprise and Standard editions:\n Must be an integer from 20 to 16384. Web and Express editions: Must be\n an integer from 20 to 16384.\n\n\n * Provisioned IOPS storage (io1): Enterprise and Standard editions: Must\n be an integer from 100 to 16384. Web and Express editions: Must be an\n integer from 100 to 16384.\n\n\n * Magnetic storage (standard): Enterprise and Standard editions: Must\n be an integer from 20 to 1024. Web and Express editions: Must be an integer\n from 20 to 1024." format: "int64" type: "integer" autoMinorVersionUpgrade: - description: "A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically. \n If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade to false." + description: "A value that indicates whether minor engine upgrades are applied automatically\nto the DB instance during the maintenance window. By default, minor engine\nupgrades are applied automatically.\n\n\nIf you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade\nto false." type: "boolean" availabilityZone: - description: "The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). \n Amazon Aurora \n Each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. \n Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. \n Example: us-east-1d \n Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint." + description: "The Availability Zone (AZ) where the database will be created. For information\non Amazon Web Services Regions and Availability Zones, see Regions and Availability\nZones (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html).\n\n\nAmazon Aurora\n\n\nEach Aurora DB cluster hosts copies of its storage in three separate Availability\nZones. Specify one of these Availability Zones. Aurora automatically chooses\nan appropriate Availability Zone if you don't specify one.\n\n\nDefault: A random, system-chosen Availability Zone in the endpoint's Amazon\nWeb Services Region.\n\n\nExample: us-east-1d\n\n\nConstraint: The AvailabilityZone parameter can't be specified if the DB instance\nis a Multi-AZ deployment. The specified Availability Zone must be in the\nsame Amazon Web Services Region as the current endpoint." type: "string" backupRetentionPeriod: - description: "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. \n Amazon Aurora \n Not applicable. The retention period for automated backups is managed by the DB cluster. \n Default: 1 \n Constraints: \n * Must be a value from 0 to 35 \n * Can't be set to 0 if the DB instance is a source to read replicas \n * Can't be set to 0 for an RDS Custom for Oracle DB instance" + description: "The number of days for which automated backups are retained. Setting this\nparameter to a positive number enables backups. Setting this parameter to\n0 disables automated backups.\n\n\nAmazon Aurora\n\n\nNot applicable. The retention period for automated backups is managed by\nthe DB cluster.\n\n\nDefault: 1\n\n\nConstraints:\n\n\n * Must be a value from 0 to 35\n\n\n * Can't be set to 0 if the DB instance is a source to read replicas\n\n\n * Can't be set to 0 for an RDS Custom for Oracle DB instance" format: "int64" type: "integer" backupTarget: - description: "Specifies where automated backups and manual snapshots are stored. \n Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region. \n For more information, see Working with Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide." + description: "Specifies where automated backups and manual snapshots are stored.\n\n\nPossible values are outposts (Amazon Web Services Outposts) and region (Amazon\nWeb Services Region). The default is region.\n\n\nFor more information, see Working with Amazon RDS on Amazon Web Services\nOutposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html)\nin the Amazon RDS User Guide." type: "string" caCertificateIdentifier: - description: "Specifies the CA certificate identifier to use for the DB instance’s server certificate. \n This setting doesn't apply to RDS Custom. \n For more information, see Using SSL/TLS to encrypt a connection to a DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the Amazon Aurora User Guide." + description: "Specifies the CA certificate identifier to use for the DB instance’s server\ncertificate.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nFor more information, see Using SSL/TLS to encrypt a connection to a DB instance\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html)\nin the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to\na DB cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html)\nin the Amazon Aurora User Guide." type: "string" characterSetName: - description: "For supported engines, this value indicates that the DB instance should be associated with the specified CharacterSet. \n This setting doesn't apply to RDS Custom. However, if you need to change the character set, you can change it on the database itself. \n Amazon Aurora \n Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster." + description: "For supported engines, this value indicates that the DB instance should be\nassociated with the specified CharacterSet.\n\n\nThis setting doesn't apply to RDS Custom. However, if you need to change\nthe character set, you can change it on the database itself.\n\n\nAmazon Aurora\n\n\nNot applicable. The character set is managed by the DB cluster. For more\ninformation, see CreateDBCluster." type: "string" copyTagsToSnapshot: - description: "A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. \n Amazon Aurora \n Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting." + description: "A value that indicates whether to copy tags from the DB instance to snapshots\nof the DB instance. By default, tags are not copied.\n\n\nAmazon Aurora\n\n\nNot applicable. Copying tags to snapshots is managed by the DB cluster. Setting\nthis value for an Aurora DB instance has no effect on the DB cluster setting." type: "boolean" customIAMInstanceProfile: - description: "The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: \n * The profile must exist in your account. \n * The profile must have an IAM role that Amazon EC2 has permissions to assume. \n * The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. \n For the list of permissions required for the IAM role, see Configure IAM and your VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) in the Amazon RDS User Guide. \n This setting is required for RDS Custom." + description: "The instance profile associated with the underlying Amazon EC2 instance of\nan RDS Custom DB instance. The instance profile must meet the following requirements:\n\n\n * The profile must exist in your account.\n\n\n * The profile must have an IAM role that Amazon EC2 has permissions to\n assume.\n\n\n * The instance profile name and the associated IAM role name must start\n with the prefix AWSRDSCustom.\n\n\nFor the list of permissions required for the IAM role, see Configure IAM\nand your VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc)\nin the Amazon RDS User Guide.\n\n\nThis setting is required for RDS Custom." type: "string" dbClusterIdentifier: - description: "The identifier of the DB cluster that the instance will belong to. \n This setting doesn't apply to RDS Custom." + description: "The identifier of the DB cluster that the instance will belong to.\n\n\nThis setting doesn't apply to RDS Custom." type: "string" dbClusterSnapshotIdentifier: - description: "The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore from. \n For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the Amazon RDS User Guide. \n Constraints: \n * Must match the identifier of an existing Multi-AZ DB cluster snapshot. \n * Can't be specified when DBSnapshotIdentifier is specified. \n * Must be specified when DBSnapshotIdentifier isn't specified. \n * If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the DBClusterSnapshotIdentifier must be the ARN of the shared snapshot. \n * Can't be the identifier of an Aurora DB cluster snapshot. \n * Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster snapshot." + description: "The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore\nfrom.\n\n\nFor more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html)\nin the Amazon RDS User Guide.\n\n\nConstraints:\n\n\n * Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n\n\n * Can't be specified when DBSnapshotIdentifier is specified.\n\n\n * Must be specified when DBSnapshotIdentifier isn't specified.\n\n\n * If you are restoring from a shared manual Multi-AZ DB cluster snapshot,\n the DBClusterSnapshotIdentifier must be the ARN of the shared snapshot.\n\n\n * Can't be the identifier of an Aurora DB cluster snapshot.\n\n\n * Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster\n snapshot." type: "string" dbInstanceClass: - description: "The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide or Aurora DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the Amazon Aurora User Guide." + description: "The compute and memory capacity of the DB instance, for example db.m5.large.\nNot all DB instance classes are available in all Amazon Web Services Regions,\nor for all database engines. For the full list of DB instance classes, and\navailability for your engine, see DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html)\nin the Amazon RDS User Guide or Aurora DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html)\nin the Amazon Aurora User Guide." type: "string" dbInstanceIdentifier: - description: "The DB instance identifier. This parameter is stored as a lowercase string. \n Constraints: \n * Must contain from 1 to 63 letters, numbers, or hyphens. \n * First character must be a letter. \n * Can't end with a hyphen or contain two consecutive hyphens. \n Example: mydbinstance" + description: "The DB instance identifier. This parameter is stored as a lowercase string.\n\n\nConstraints:\n\n\n * Must contain from 1 to 63 letters, numbers, or hyphens.\n\n\n * First character must be a letter.\n\n\n * Can't end with a hyphen or contain two consecutive hyphens.\n\n\nExample: mydbinstance" type: "string" dbName: - description: "The meaning of this parameter differs according to the database engine you use. \n MySQL \n The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. \n Constraints: \n * Must contain 1 to 64 letters or numbers. \n * Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). \n * Can't be a word reserved by the specified database engine \n MariaDB \n The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. \n Constraints: \n * Must contain 1 to 64 letters or numbers. \n * Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). \n * Can't be a word reserved by the specified database engine \n PostgreSQL \n The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance. \n Constraints: \n * Must contain 1 to 63 letters, numbers, or underscores. \n * Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). \n * Can't be a word reserved by the specified database engine \n Oracle \n The Oracle System ID (SID) of the created DB instance. If you specify null, the default value ORCL is used. You can't specify the string NULL, or any other reserved word, for DBName. \n Default: ORCL \n Constraints: \n * Can't be longer than 8 characters \n Amazon RDS Custom for Oracle \n The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL. \n Default: ORCL \n Constraints: \n * It must contain 1 to 8 alphanumeric characters. \n * It must contain a letter. \n * It can't be a word reserved by the database engine. \n Amazon RDS Custom for SQL Server \n Not applicable. Must be null. \n SQL Server \n Not applicable. Must be null. \n Amazon Aurora MySQL \n The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster. \n Constraints: \n * It must contain 1 to 64 alphanumeric characters. \n * It can't be a word reserved by the database engine. \n Amazon Aurora PostgreSQL \n The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster. \n Constraints: \n * It must contain 1 to 63 alphanumeric characters. \n * It must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9). \n * It can't be a word reserved by the database engine." + description: "The meaning of this parameter differs according to the database engine you\nuse.\n\n\nMySQL\n\n\nThe name of the database to create when the DB instance is created. If this\nparameter isn't specified, no database is created in the DB instance.\n\n\nConstraints:\n\n\n * Must contain 1 to 64 letters or numbers.\n\n\n * Must begin with a letter. Subsequent characters can be letters, underscores,\n or digits (0-9).\n\n\n * Can't be a word reserved by the specified database engine\n\n\nMariaDB\n\n\nThe name of the database to create when the DB instance is created. If this\nparameter isn't specified, no database is created in the DB instance.\n\n\nConstraints:\n\n\n * Must contain 1 to 64 letters or numbers.\n\n\n * Must begin with a letter. Subsequent characters can be letters, underscores,\n or digits (0-9).\n\n\n * Can't be a word reserved by the specified database engine\n\n\nPostgreSQL\n\n\nThe name of the database to create when the DB instance is created. If this\nparameter isn't specified, a database named postgres is created in the DB\ninstance.\n\n\nConstraints:\n\n\n * Must contain 1 to 63 letters, numbers, or underscores.\n\n\n * Must begin with a letter. Subsequent characters can be letters, underscores,\n or digits (0-9).\n\n\n * Can't be a word reserved by the specified database engine\n\n\nOracle\n\n\nThe Oracle System ID (SID) of the created DB instance. If you specify null,\nthe default value ORCL is used. You can't specify the string NULL, or any\nother reserved word, for DBName.\n\n\nDefault: ORCL\n\n\nConstraints:\n\n\n * Can't be longer than 8 characters\n\n\nAmazon RDS Custom for Oracle\n\n\nThe Oracle System ID (SID) of the created RDS Custom DB instance. If you\ndon't specify a value, the default value is ORCL.\n\n\nDefault: ORCL\n\n\nConstraints:\n\n\n * It must contain 1 to 8 alphanumeric characters.\n\n\n * It must contain a letter.\n\n\n * It can't be a word reserved by the database engine.\n\n\nAmazon RDS Custom for SQL Server\n\n\nNot applicable. Must be null.\n\n\nSQL Server\n\n\nNot applicable. Must be null.\n\n\nAmazon Aurora MySQL\n\n\nThe name of the database to create when the primary DB instance of the Aurora\nMySQL DB cluster is created. If this parameter isn't specified for an Aurora\nMySQL DB cluster, no database is created in the DB cluster.\n\n\nConstraints:\n\n\n * It must contain 1 to 64 alphanumeric characters.\n\n\n * It can't be a word reserved by the database engine.\n\n\nAmazon Aurora PostgreSQL\n\n\nThe name of the database to create when the primary DB instance of the Aurora\nPostgreSQL DB cluster is created. If this parameter isn't specified for an\nAurora PostgreSQL DB cluster, a database named postgres is created in the\nDB cluster.\n\n\nConstraints:\n\n\n * It must contain 1 to 63 alphanumeric characters.\n\n\n * It must begin with a letter. Subsequent characters can be letters, underscores,\n or digits (0 to 9).\n\n\n * It can't be a word reserved by the database engine." type: "string" dbParameterGroupName: - description: "The name of the DB parameter group to associate with this DB instance. If you do not specify a value, then the default DB parameter group for the specified DB engine and version is used. \n This setting doesn't apply to RDS Custom. \n Constraints: \n * It must be 1 to 255 letters, numbers, or hyphens. \n * The first character must be a letter. \n * It can't end with a hyphen or contain two consecutive hyphens." + description: "The name of the DB parameter group to associate with this DB instance. If\nyou do not specify a value, then the default DB parameter group for the specified\nDB engine and version is used.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nConstraints:\n\n\n * It must be 1 to 255 letters, numbers, or hyphens.\n\n\n * The first character must be a letter.\n\n\n * It can't end with a hyphen or contain two consecutive hyphens." type: "string" dbParameterGroupRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" dbSnapshotIdentifier: - description: "The identifier for the DB snapshot to restore from. \n Constraints: \n * Must match the identifier of an existing DBSnapshot. \n * Can't be specified when DBClusterSnapshotIdentifier is specified. \n * Must be specified when DBClusterSnapshotIdentifier isn't specified. \n * If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot." + description: "The identifier for the DB snapshot to restore from.\n\n\nConstraints:\n\n\n * Must match the identifier of an existing DBSnapshot.\n\n\n * Can't be specified when DBClusterSnapshotIdentifier is specified.\n\n\n * Must be specified when DBClusterSnapshotIdentifier isn't specified.\n\n\n * If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier\n must be the ARN of the shared DB snapshot." type: "string" dbSubnetGroupName: - description: "A DB subnet group to associate with this DB instance. \n Constraints: Must match the name of an existing DBSubnetGroup. Must not be default. \n Example: mydbsubnetgroup" + description: "A DB subnet group to associate with this DB instance.\n\n\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be\ndefault.\n\n\nExample: mydbsubnetgroup" type: "string" dbSubnetGroupRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" deletionProtection: - description: "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). \n Amazon Aurora \n Not applicable. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster." + description: "A value that indicates whether the DB instance has deletion protection enabled.\nThe database can't be deleted when deletion protection is enabled. By default,\ndeletion protection isn't enabled. For more information, see Deleting a DB\nInstance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html).\n\n\nAmazon Aurora\n\n\nNot applicable. You can enable or disable deletion protection for the DB\ncluster. For more information, see CreateDBCluster. DB instances in a DB\ncluster can be deleted even when deletion protection is enabled for the DB\ncluster." type: "boolean" destinationRegion: description: "DestinationRegion is used for presigning the request to a given region." type: "string" domain: - description: "The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. \n For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) in the Amazon RDS User Guide. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable. The domain is managed by the DB cluster." + description: "The Active Directory directory ID to create the DB instance in. Currently,\nonly MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can\nbe created in an Active Directory Domain.\n\n\nFor more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html)\nin the Amazon RDS User Guide.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable. The domain is managed by the DB cluster." type: "string" domainIAMRoleName: - description: "Specify the name of the IAM role to be used when making API calls to the Directory Service. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable. The domain is managed by the DB cluster." + description: "Specify the name of the IAM role to be used when making API calls to the\nDirectory Service.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable. The domain is managed by the DB cluster." type: "string" enableCloudwatchLogsExports: - description: "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine. For more information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the Amazon RDS User Guide. \n Amazon Aurora \n Not applicable. CloudWatch Logs exports are managed by the DB cluster. \n RDS Custom \n Not applicable. \n MariaDB \n Possible values are audit, error, general, and slowquery. \n Microsoft SQL Server \n Possible values are agent and error. \n MySQL \n Possible values are audit, error, general, and slowquery. \n Oracle \n Possible values are alert, audit, listener, trace, and oemagent. \n PostgreSQL \n Possible values are postgresql and upgrade." + description: "The list of log types that need to be enabled for exporting to CloudWatch\nLogs. The values in the list depend on the DB engine. For more information,\nsee Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch)\nin the Amazon RDS User Guide.\n\n\nAmazon Aurora\n\n\nNot applicable. CloudWatch Logs exports are managed by the DB cluster.\n\n\nRDS Custom\n\n\nNot applicable.\n\n\nMariaDB\n\n\nPossible values are audit, error, general, and slowquery.\n\n\nMicrosoft SQL Server\n\n\nPossible values are agent and error.\n\n\nMySQL\n\n\nPossible values are audit, error, general, and slowquery.\n\n\nOracle\n\n\nPossible values are alert, audit, listener, trace, and oemagent.\n\n\nPostgreSQL\n\n\nPossible values are postgresql and upgrade." items: type: "string" type: "array" enableCustomerOwnedIP: - description: "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance. \n A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network. \n For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. \n For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) in the Amazon Web Services Outposts User Guide." + description: "A value that indicates whether to enable a customer-owned IP address (CoIP)\nfor an RDS on Outposts DB instance.\n\n\nA CoIP provides local or external connectivity to resources in your Outpost\nsubnets through your on-premises network. For some use cases, a CoIP can\nprovide lower latency for connections to the DB instance from outside of\nits virtual private cloud (VPC) on your local network.\n\n\nFor more information about RDS on Outposts, see Working with Amazon RDS on\nAmazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html)\nin the Amazon RDS User Guide.\n\n\nFor more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing)\nin the Amazon Web Services Outposts User Guide." type: "boolean" enableIAMDatabaseAuthentication: - description: "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. \n For more information, see IAM Database Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) in the Amazon RDS User Guide. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster." + description: "A value that indicates whether to enable mapping of Amazon Web Services Identity\nand Access Management (IAM) accounts to database accounts. By default, mapping\nisn't enabled.\n\n\nFor more information, see IAM Database Authentication for MySQL and PostgreSQL\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html)\nin the Amazon RDS User Guide.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable. Mapping Amazon Web Services IAM accounts to database accounts\nis managed by the DB cluster." type: "boolean" engine: - description: "The name of the database engine to be used for this instance. \n Not every database engine is available for every Amazon Web Services Region. \n Valid Values: \n * aurora (for MySQL 5.6-compatible Aurora) \n * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) \n * aurora-postgresql \n * custom-oracle-ee (for RDS Custom for Oracle instances) \n * custom-sqlserver-ee (for RDS Custom for SQL Server instances) \n * custom-sqlserver-se (for RDS Custom for SQL Server instances) \n * custom-sqlserver-web (for RDS Custom for SQL Server instances) \n * mariadb \n * mysql \n * oracle-ee \n * oracle-ee-cdb \n * oracle-se2 \n * oracle-se2-cdb \n * postgres \n * sqlserver-ee \n * sqlserver-se \n * sqlserver-ex \n * sqlserver-web" + description: "The name of the database engine to be used for this instance.\n\n\nNot every database engine is available for every Amazon Web Services Region.\n\n\nValid Values:\n\n\n * aurora (for MySQL 5.6-compatible Aurora)\n\n\n * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)\n\n\n * aurora-postgresql\n\n\n * custom-oracle-ee (for RDS Custom for Oracle instances)\n\n\n * custom-sqlserver-ee (for RDS Custom for SQL Server instances)\n\n\n * custom-sqlserver-se (for RDS Custom for SQL Server instances)\n\n\n * custom-sqlserver-web (for RDS Custom for SQL Server instances)\n\n\n * mariadb\n\n\n * mysql\n\n\n * oracle-ee\n\n\n * oracle-ee-cdb\n\n\n * oracle-se2\n\n\n * oracle-se2-cdb\n\n\n * postgres\n\n\n * sqlserver-ee\n\n\n * sqlserver-se\n\n\n * sqlserver-ex\n\n\n * sqlserver-web" type: "string" engineVersion: - description: "The version number of the database engine to use. \n For a list of valid engine versions, use the DescribeDBEngineVersions operation. \n The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region. \n Amazon Aurora \n Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster. \n Amazon RDS Custom for Oracle \n A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-creating.html#custom-creating.create) in the Amazon RDS User Guide. \n Amazon RDS Custom for SQL Server \n See RDS Custom for SQL Server general requirements (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-reqs-limits-MS.html) in the Amazon RDS User Guide. \n MariaDB \n For information, see MariaDB on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MariaDB.html#MariaDB.Concepts.VersionMgmt) in the Amazon RDS User Guide. \n Microsoft SQL Server \n For information, see Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) in the Amazon RDS User Guide. \n MySQL \n For information, see MySQL on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the Amazon RDS User Guide. \n Oracle \n For information, see Oracle Database Engine Release Notes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.PatchComposition.html) in the Amazon RDS User Guide. \n PostgreSQL \n For information, see Amazon RDS for PostgreSQL versions and extensions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the Amazon RDS User Guide." + description: "The version number of the database engine to use.\n\n\nFor a list of valid engine versions, use the DescribeDBEngineVersions operation.\n\n\nThe following are the database engines and links to information about the\nmajor and minor versions that are available with Amazon RDS. Not every database\nengine is available for every Amazon Web Services Region.\n\n\nAmazon Aurora\n\n\nNot applicable. The version number of the database engine to be used by the\nDB instance is managed by the DB cluster.\n\n\nAmazon RDS Custom for Oracle\n\n\nA custom engine version (CEV) that you have previously created. This setting\nis required for RDS Custom for Oracle. The CEV name has the following format:\n19.customized_string. A valid CEV name is 19.my_cev1. For more information,\nsee Creating an RDS Custom for Oracle DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-creating.html#custom-creating.create)\nin the Amazon RDS User Guide.\n\n\nAmazon RDS Custom for SQL Server\n\n\nSee RDS Custom for SQL Server general requirements (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-reqs-limits-MS.html)\nin the Amazon RDS User Guide.\n\n\nMariaDB\n\n\nFor information, see MariaDB on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MariaDB.html#MariaDB.Concepts.VersionMgmt)\nin the Amazon RDS User Guide.\n\n\nMicrosoft SQL Server\n\n\nFor information, see Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport)\nin the Amazon RDS User Guide.\n\n\nMySQL\n\n\nFor information, see MySQL on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt)\nin the Amazon RDS User Guide.\n\n\nOracle\n\n\nFor information, see Oracle Database Engine Release Notes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.PatchComposition.html)\nin the Amazon RDS User Guide.\n\n\nPostgreSQL\n\n\nFor information, see Amazon RDS for PostgreSQL versions and extensions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts)\nin the Amazon RDS User Guide." type: "string" iops: - description: "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid IOPS values, see Amazon RDS DB instance storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the Amazon RDS User Guide. \n Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 of the storage amount for the DB instance. \n Amazon Aurora \n Not applicable. Storage is managed by the DB cluster." + description: "The amount of Provisioned IOPS (input/output operations per second) to be\ninitially allocated for the DB instance. For information about valid IOPS\nvalues, see Amazon RDS DB instance storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html)\nin the Amazon RDS User Guide.\n\n\nConstraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must\nbe a multiple between .5 and 50 of the storage amount for the DB instance.\nFor SQL Server DB instances, must be a multiple between 1 and 50 of the storage\namount for the DB instance.\n\n\nAmazon Aurora\n\n\nNot applicable. Storage is managed by the DB cluster." format: "int64" type: "integer" kmsKeyID: - description: "The Amazon Web Services KMS key identifier for an encrypted DB instance. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. \n Amazon Aurora \n Not applicable. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster. \n If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. \n Amazon RDS Custom \n A KMS key is required for RDS Custom instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key." + description: "The Amazon Web Services KMS key identifier for an encrypted DB instance.\n\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias\nARN, or alias name for the KMS key. To use a KMS key in a different Amazon\nWeb Services account, specify the key ARN or alias ARN.\n\n\nAmazon Aurora\n\n\nNot applicable. The Amazon Web Services KMS key identifier is managed by\nthe DB cluster. For more information, see CreateDBCluster.\n\n\nIf StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId\nparameter, then Amazon RDS uses your default KMS key. There is a default\nKMS key for your Amazon Web Services account. Your Amazon Web Services account\nhas a different default KMS key for each Amazon Web Services Region.\n\n\nAmazon RDS Custom\n\n\nA KMS key is required for RDS Custom instances. For most RDS engines, if\nyou leave this parameter empty while enabling StorageEncrypted, the engine\nuses the default KMS key. However, RDS Custom doesn't use the default key\nwhen this parameter is empty. You must explicitly specify a key." type: "string" kmsKeyRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" licenseModel: - description: "License model information for this DB instance. \n Valid values: license-included | bring-your-own-license | general-public-license \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable." + description: "License model information for this DB instance.\n\n\nValid values: license-included | bring-your-own-license | general-public-license\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable." type: "string" manageMasterUserPassword: - description: "A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager. \n For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide. \n Constraints: \n * Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified." + description: "A value that indicates whether to manage the master user password with Amazon\nWeb Services Secrets Manager.\n\n\nFor more information, see Password management with Amazon Web Services Secrets\nManager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html)\nin the Amazon RDS User Guide.\n\n\nConstraints:\n\n\n * Can't manage the master user password with Amazon Web Services Secrets\n Manager if MasterUserPassword is specified." type: "boolean" masterUserPassword: - description: "The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\". \n Amazon Aurora \n Not applicable. The password for the master user is managed by the DB cluster. \n Constraints: Can't be specified if ManageMasterUserPassword is turned on. \n MariaDB \n Constraints: Must contain from 8 to 41 characters. \n Microsoft SQL Server \n Constraints: Must contain from 8 to 128 characters. \n MySQL \n Constraints: Must contain from 8 to 41 characters. \n Oracle \n Constraints: Must contain from 8 to 30 characters. \n PostgreSQL \n Constraints: Must contain from 8 to 128 characters." + description: "The password for the master user. The password can include any printable\nASCII character except \"/\", \"\"\", or \"@\".\n\n\nAmazon Aurora\n\n\nNot applicable. The password for the master user is managed by the DB cluster.\n\n\nConstraints: Can't be specified if ManageMasterUserPassword is turned on.\n\n\nMariaDB\n\n\nConstraints: Must contain from 8 to 41 characters.\n\n\nMicrosoft SQL Server\n\n\nConstraints: Must contain from 8 to 128 characters.\n\n\nMySQL\n\n\nConstraints: Must contain from 8 to 41 characters.\n\n\nOracle\n\n\nConstraints: Must contain from 8 to 30 characters.\n\n\nPostgreSQL\n\n\nConstraints: Must contain from 8 to 128 characters." properties: key: description: "Key is the key within the secret" @@ -175,71 +175,71 @@ spec: type: "object" x-kubernetes-map-type: "atomic" masterUserSecretKMSKeyID: - description: "The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. \n This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB instance. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. \n If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. \n There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region." + description: "The Amazon Web Services KMS key identifier to encrypt a secret that is automatically\ngenerated and managed in Amazon Web Services Secrets Manager.\n\n\nThis setting is valid only if the master user password is managed by RDS\nin Amazon Web Services Secrets Manager for the DB instance.\n\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias\nARN, or alias name for the KMS key. To use a KMS key in a different Amazon\nWeb Services account, specify the key ARN or alias ARN.\n\n\nIf you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager\nKMS key is used to encrypt the secret. If the secret is in a different Amazon\nWeb Services account, then you can't use the aws/secretsmanager KMS key to\nencrypt the secret, and you must use a customer managed KMS key.\n\n\nThere is a default KMS key for your Amazon Web Services account. Your Amazon\nWeb Services account has a different default KMS key for each Amazon Web\nServices Region." type: "string" masterUserSecretKMSKeyRef: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" type: "object" type: "object" masterUsername: - description: "The name for the master user. \n Amazon Aurora \n Not applicable. The name for the master user is managed by the DB cluster. \n Amazon RDS \n Constraints: \n * Required. \n * Must be 1 to 16 letters, numbers, or underscores. \n * First character must be a letter. \n * Can't be a reserved word for the chosen database engine." + description: "The name for the master user.\n\n\nAmazon Aurora\n\n\nNot applicable. The name for the master user is managed by the DB cluster.\n\n\nAmazon RDS\n\n\nConstraints:\n\n\n * Required.\n\n\n * Must be 1 to 16 letters, numbers, or underscores.\n\n\n * First character must be a letter.\n\n\n * Can't be a reserved word for the chosen database engine." type: "string" maxAllocatedStorage: - description: "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. \n For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) in the Amazon RDS User Guide. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable. Storage is managed by the DB cluster." + description: "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically\nscale the storage of the DB instance.\n\n\nFor more information about this setting, including limitations that apply\nto it, see Managing capacity automatically with Amazon RDS storage autoscaling\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling)\nin the Amazon RDS User Guide.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable. Storage is managed by the DB cluster." format: "int64" type: "integer" monitoringInterval: - description: "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0. \n If MonitoringRoleArn is specified, then you must set MonitoringInterval to a value other than 0. \n This setting doesn't apply to RDS Custom. \n Valid Values: 0, 1, 5, 10, 15, 30, 60" + description: "The interval, in seconds, between points when Enhanced Monitoring metrics\nare collected for the DB instance. To disable collection of Enhanced Monitoring\nmetrics, specify 0. The default is 0.\n\n\nIf MonitoringRoleArn is specified, then you must set MonitoringInterval to\na value other than 0.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nValid Values: 0, 1, 5, 10, 15, 30, 60" format: "int64" type: "integer" monitoringRoleARN: - description: "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the Amazon RDS User Guide. \n If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. \n This setting doesn't apply to RDS Custom." + description: "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics\nto Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess.\nFor information on creating a monitoring role, see Setting Up and Enabling\nEnhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling)\nin the Amazon RDS User Guide.\n\n\nIf MonitoringInterval is set to a value other than 0, then you must supply\na MonitoringRoleArn value.\n\n\nThis setting doesn't apply to RDS Custom." type: "string" multiAZ: - description: "A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster." + description: "A value that indicates whether the DB instance is a Multi-AZ deployment.\nYou can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ\ndeployment.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable. DB instance Availability Zones (AZs) are managed by the DB\ncluster." type: "boolean" ncharCharacterSetName: - description: "The name of the NCHAR character set for the Oracle DB instance. \n This parameter doesn't apply to RDS Custom." + description: "The name of the NCHAR character set for the Oracle DB instance.\n\n\nThis parameter doesn't apply to RDS Custom." type: "string" networkType: - description: "The network type of the DB instance. \n Valid values: \n * IPV4 \n * DUAL \n The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). \n For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the Amazon RDS User Guide." + description: "The network type of the DB instance.\n\n\nValid values:\n\n\n * IPV4\n\n\n * DUAL\n\n\nThe network type is determined by the DBSubnetGroup specified for the DB\ninstance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4\nand the IPv6 protocols (DUAL).\n\n\nFor more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html)\nin the Amazon RDS User Guide." type: "string" optionGroupName: - description: "A value that indicates that the DB instance should be associated with the specified option group. \n Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable." + description: "A value that indicates that the DB instance should be associated with the\nspecified option group.\n\n\nPermanent options, such as the TDE option for Oracle Advanced Security TDE,\ncan't be removed from an option group. Also, that option group can't be removed\nfrom a DB instance after it is associated with a DB instance.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable." type: "string" performanceInsightsEnabled: - description: "A value that indicates whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the Amazon RDS User Guide. \n This setting doesn't apply to RDS Custom." + description: "A value that indicates whether to enable Performance Insights for the DB\ninstance. For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html)\nin the Amazon RDS User Guide.\n\n\nThis setting doesn't apply to RDS Custom." type: "boolean" performanceInsightsKMSKeyID: - description: "The Amazon Web Services KMS key identifier for encryption of Performance Insights data. \n The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. \n If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. \n This setting doesn't apply to RDS Custom." + description: "The Amazon Web Services KMS key identifier for encryption of Performance\nInsights data.\n\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias\nARN, or alias name for the KMS key.\n\n\nIf you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon\nRDS uses your default KMS key. There is a default KMS key for your Amazon\nWeb Services account. Your Amazon Web Services account has a different default\nKMS key for each Amazon Web Services Region.\n\n\nThis setting doesn't apply to RDS Custom." type: "string" performanceInsightsRetentionPeriod: - description: "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid: \n * 7 \n * month * 31, where month is a number of months from 1-23 \n * 731 \n For example, the following values are valid: \n * 93 (3 months * 31) \n * 341 (11 months * 31) \n * 589 (19 months * 31) \n * 731 \n If you specify a retention period such as 94, which isn't a valid value, RDS issues an error. \n This setting doesn't apply to RDS Custom." + description: "The number of days to retain Performance Insights data. The default is 7\ndays. The following values are valid:\n\n\n * 7\n\n\n * month * 31, where month is a number of months from 1-23\n\n\n * 731\n\n\nFor example, the following values are valid:\n\n\n * 93 (3 months * 31)\n\n\n * 341 (11 months * 31)\n\n\n * 589 (19 months * 31)\n\n\n * 731\n\n\nIf you specify a retention period such as 94, which isn't a valid value,\nRDS issues an error.\n\n\nThis setting doesn't apply to RDS Custom." format: "int64" type: "integer" port: - description: "The port number on which the database accepts connections. \n MySQL \n Default: 3306 \n Valid values: 1150-65535 \n Type: Integer \n MariaDB \n Default: 3306 \n Valid values: 1150-65535 \n Type: Integer \n PostgreSQL \n Default: 5432 \n Valid values: 1150-65535 \n Type: Integer \n Oracle \n Default: 1521 \n Valid values: 1150-65535 \n SQL Server \n Default: 1433 \n Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156. \n Amazon Aurora \n Default: 3306 \n Valid values: 1150-65535 \n Type: Integer" + description: "The port number on which the database accepts connections.\n\n\nMySQL\n\n\nDefault: 3306\n\n\nValid values: 1150-65535\n\n\nType: Integer\n\n\nMariaDB\n\n\nDefault: 3306\n\n\nValid values: 1150-65535\n\n\nType: Integer\n\n\nPostgreSQL\n\n\nDefault: 5432\n\n\nValid values: 1150-65535\n\n\nType: Integer\n\n\nOracle\n\n\nDefault: 1521\n\n\nValid values: 1150-65535\n\n\nSQL Server\n\n\nDefault: 1433\n\n\nValid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and\n49152-49156.\n\n\nAmazon Aurora\n\n\nDefault: 3306\n\n\nValid values: 1150-65535\n\n\nType: Integer" format: "int64" type: "integer" preSignedURL: - description: "When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API operation in the source Amazon Web Services Region that contains the source DB instance. \n This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions. \n This setting applies only when replicating from a source DB instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. \n You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region. \n The presigned URL must be a valid request for the CreateDBInstanceReadReplica API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values: \n * DestinationRegion - The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica operation is called that contains this presigned URL. For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 Amazon Web Services Region. \n * KmsKeyId - The KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. \n * SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. \n To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). \n If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. \n SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region read replicas. \n This setting doesn't apply to RDS Custom." + description: "When you are creating a read replica from one Amazon Web Services GovCloud\n(US) Region to another or from one China Amazon Web Services Region to another,\nthe URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica\nAPI operation in the source Amazon Web Services Region that contains the\nsource DB instance.\n\n\nThis setting applies only to Amazon Web Services GovCloud (US) Regions and\nChina Amazon Web Services Regions. It's ignored in other Amazon Web Services\nRegions.\n\n\nThis setting applies only when replicating from a source DB instance. Source\nDB clusters aren't supported in Amazon Web Services GovCloud (US) Regions\nand China Amazon Web Services Regions.\n\n\nYou must specify this parameter when you create an encrypted read replica\nfrom another Amazon Web Services Region by using the Amazon RDS API. Don't\nspecify PreSignedUrl when you are creating an encrypted read replica in the\nsame Amazon Web Services Region.\n\n\nThe presigned URL must be a valid request for the CreateDBInstanceReadReplica\nAPI operation that can run in the source Amazon Web Services Region that\ncontains the encrypted source DB instance. The presigned URL request must\ncontain the following parameter values:\n\n\n * DestinationRegion - The Amazon Web Services Region that the encrypted\n read replica is created in. This Amazon Web Services Region is the same\n one where the CreateDBInstanceReadReplica operation is called that contains\n this presigned URL. For example, if you create an encrypted DB instance\n in the us-west-1 Amazon Web Services Region, from a source DB instance\n in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica\n operation in the us-east-1 Amazon Web Services Region and provide a presigned\n URL that contains a call to the CreateDBInstanceReadReplica operation\n in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion\n in the presigned URL must be set to the us-east-1 Amazon Web Services\n Region.\n\n\n * KmsKeyId - The KMS key identifier for the key to use to encrypt the\n read replica in the destination Amazon Web Services Region. This is the\n same identifier for both the CreateDBInstanceReadReplica operation that\n is called in the destination Amazon Web Services Region, and the operation\n contained in the presigned URL.\n\n\n * SourceDBInstanceIdentifier - The DB instance identifier for the encrypted\n DB instance to be replicated. This identifier must be in the Amazon Resource\n Name (ARN) format for the source Amazon Web Services Region. For example,\n if you are creating an encrypted read replica from a DB instance in the\n us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier\n looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115.\n\n\nTo learn how to generate a Signature Version 4 signed request, see Authenticating\nRequests: Using Query Parameters (Amazon Web Services Signature Version 4)\n(https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)\nand Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).\n\n\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\nSourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl\nmanually. Specifying SourceRegion autogenerates a presigned URL that is a\nvalid request for the operation that can run in the source Amazon Web Services\nRegion.\n\n\nSourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server\ndoesn't support cross-Region read replicas.\n\n\nThis setting doesn't apply to RDS Custom." type: "string" preferredBackupWindow: - description: "The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) in the Amazon RDS User Guide. \n Amazon Aurora \n Not applicable. The daily time range for creating automated backups is managed by the DB cluster. \n Constraints: \n * Must be in the format hh24:mi-hh24:mi. \n * Must be in Universal Coordinated Time (UTC). \n * Must not conflict with the preferred maintenance window. \n * Must be at least 30 minutes." + description: "The daily time range during which automated backups are created if automated\nbackups are enabled, using the BackupRetentionPeriod parameter. The default\nis a 30-minute window selected at random from an 8-hour block of time for\neach Amazon Web Services Region. For more information, see Backup window\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow)\nin the Amazon RDS User Guide.\n\n\nAmazon Aurora\n\n\nNot applicable. The daily time range for creating automated backups is managed\nby the DB cluster.\n\n\nConstraints:\n\n\n * Must be in the format hh24:mi-hh24:mi.\n\n\n * Must be in Universal Coordinated Time (UTC).\n\n\n * Must not conflict with the preferred maintenance window.\n\n\n * Must be at least 30 minutes." type: "string" preferredMaintenanceWindow: - description: "The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance). \n Format: ddd:hh24:mi-ddd:hh24:mi \n The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. \n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. \n Constraints: Minimum 30-minute window." + description: "The time range each week during which system maintenance can occur, in Universal\nCoordinated Time (UTC). For more information, see Amazon RDS Maintenance\nWindow (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance).\n\n\nFormat: ddd:hh24:mi-ddd:hh24:mi\n\n\nThe default is a 30-minute window selected at random from an 8-hour block\nof time for each Amazon Web Services Region, occurring on a random day of\nthe week.\n\n\nValid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n\n\nConstraints: Minimum 30-minute window." type: "string" processorFeatures: - description: "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable." + description: "The number of CPU cores and the number of threads per core for the DB instance\nclass of the DB instance.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable." items: - description: "Contains the processor features of a DB instance class. \n To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter. \n You can set the processor features of the DB instance class for a DB instance when you call one of the following actions: \n * CreateDBInstance \n * ModifyDBInstance \n * RestoreDBInstanceFromDBSnapshot \n * RestoreDBInstanceFromS3 \n * RestoreDBInstanceToPointInTime \n You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter. \n In addition, you can use the following actions for DB instance class processor information: \n * DescribeDBInstances \n * DescribeDBSnapshots \n * DescribeValidDBInstanceModifications \n If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met: \n * You are accessing an Oracle DB instance. \n * Your Oracle DB instance class supports configuring the number of CPU cores and threads per core. \n * The current number CPU cores and threads is set to a non-default value. \n For more information, see Configuring the Processor of the DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor) in the Amazon RDS User Guide." + description: "Contains the processor features of a DB instance class.\n\n\nTo specify the number of CPU cores, use the coreCount feature name for the\nName parameter. To specify the number of threads per core, use the threadsPerCore\nfeature name for the Name parameter.\n\n\nYou can set the processor features of the DB instance class for a DB instance\nwhen you call one of the following actions:\n\n\n * CreateDBInstance\n\n\n * ModifyDBInstance\n\n\n * RestoreDBInstanceFromDBSnapshot\n\n\n * RestoreDBInstanceFromS3\n\n\n * RestoreDBInstanceToPointInTime\n\n\nYou can view the valid processor values for a particular instance class by\ncalling the DescribeOrderableDBInstanceOptions action and specifying the\ninstance class for the DBInstanceClass parameter.\n\n\nIn addition, you can use the following actions for DB instance class processor\ninformation:\n\n\n * DescribeDBInstances\n\n\n * DescribeDBSnapshots\n\n\n * DescribeValidDBInstanceModifications\n\n\nIf you call DescribeDBInstances, ProcessorFeature returns non-null values\nonly if the following conditions are met:\n\n\n * You are accessing an Oracle DB instance.\n\n\n * Your Oracle DB instance class supports configuring the number of CPU\n cores and threads per core.\n\n\n * The current number CPU cores and threads is set to a non-default value.\n\n\nFor more information, see Configuring the Processor of the DB Instance Class\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor)\nin the Amazon RDS User Guide." properties: name: type: "string" @@ -248,35 +248,35 @@ spec: type: "object" type: "array" promotionTier: - description: "A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) in the Amazon Aurora User Guide. \n This setting doesn't apply to RDS Custom. \n Default: 1 \n Valid Values: 0 - 15" + description: "A value that specifies the order in which an Aurora Replica is promoted to\nthe primary instance after a failure of the existing primary instance. For\nmore information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance)\nin the Amazon Aurora User Guide.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nDefault: 1\n\n\nValid Values: 0 - 15" format: "int64" type: "integer" publiclyAccessible: - description: "A value that indicates whether the DB instance is publicly accessible. \n When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. \n When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. \n Default: The default behavior varies depending on whether DBSubnetGroupName is specified. \n If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: \n * If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private. \n * If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public. \n If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: \n * If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private. \n * If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public." + description: "A value that indicates whether the DB instance is publicly accessible.\n\n\nWhen the DB instance is publicly accessible, its Domain Name System (DNS)\nendpoint resolves to the private IP address from within the DB instance's\nvirtual private cloud (VPC). It resolves to the public IP address from outside\nof the DB instance's VPC. Access to the DB instance is ultimately controlled\nby the security group it uses. That public access is not permitted if the\nsecurity group assigned to the DB instance doesn't permit it.\n\n\nWhen the DB instance isn't publicly accessible, it is an internal DB instance\nwith a DNS name that resolves to a private IP address.\n\n\nDefault: The default behavior varies depending on whether DBSubnetGroupName\nis specified.\n\n\nIf DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified,\nthe following applies:\n\n\n * If the default VPC in the target Region doesn’t have an internet gateway\n attached to it, the DB instance is private.\n\n\n * If the default VPC in the target Region has an internet gateway attached\n to it, the DB instance is public.\n\n\nIf DBSubnetGroupName is specified, and PubliclyAccessible isn't specified,\nthe following applies:\n\n\n * If the subnets are part of a VPC that doesn’t have an internet gateway\n attached to it, the DB instance is private.\n\n\n * If the subnets are part of a VPC that has an internet gateway attached\n to it, the DB instance is public." type: "boolean" replicaMode: - description: "The open mode of the replica database: mounted or read-only. \n This parameter is only supported for Oracle DB instances. \n Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. \n You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with Oracle Read Replicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) in the Amazon RDS User Guide. \n For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually." + description: "The open mode of the replica database: mounted or read-only.\n\n\nThis parameter is only supported for Oracle DB instances.\n\n\nMounted DB replicas are included in Oracle Database Enterprise Edition. The\nmain use case for mounted replicas is cross-Region disaster recovery. The\nprimary database doesn't use Active Data Guard to transmit information to\nthe mounted replica. Because it doesn't accept user connections, a mounted\nreplica can't serve a read-only workload.\n\n\nYou can create a combination of mounted and read-only DB replicas for the\nsame primary DB instance. For more information, see Working with Oracle Read\nReplicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html)\nin the Amazon RDS User Guide.\n\n\nFor RDS Custom, you must specify this parameter and set it to mounted. The\nvalue won't be set by default. After replica creation, you can manage the\nopen mode manually." type: "string" sourceDBInstanceIdentifier: - description: "The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, with the exception of Oracle and SQL Server, which can have up to five. \n Constraints: \n * Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server DB instance. \n * Can't be specified if the SourceDBClusterIdentifier parameter is also specified. \n * For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.limitations.html#oracle-read-replicas.limitations.versions-and-licenses) in the Amazon RDS User Guide. \n * For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.html#SQLServer.ReadReplicas.Limitations) in the Amazon RDS User Guide. \n * The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0. \n * If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier. \n * If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas." + description: "The identifier of the DB instance that will act as the source for the read\nreplica. Each DB instance can have up to 15 read replicas, with the exception\nof Oracle and SQL Server, which can have up to five.\n\n\nConstraints:\n\n\n * Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL,\n or SQL Server DB instance.\n\n\n * Can't be specified if the SourceDBClusterIdentifier parameter is also\n specified.\n\n\n * For the limitations of Oracle read replicas, see Version and licensing\n considerations for RDS for Oracle replicas (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.limitations.html#oracle-read-replicas.limitations.versions-and-licenses)\n in the Amazon RDS User Guide.\n\n\n * For the limitations of SQL Server read replicas, see Read replica limitations\n with SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.html#SQLServer.ReadReplicas.Limitations)\n in the Amazon RDS User Guide.\n\n\n * The specified DB instance must have automatic backups enabled, that\n is, its backup retention period must be greater than 0.\n\n\n * If the source DB instance is in the same Amazon Web Services Region\n as the read replica, specify a valid DB instance identifier.\n\n\n * If the source DB instance is in a different Amazon Web Services Region\n from the read replica, specify a valid DB instance ARN. For more information,\n see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing)\n in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS\n Custom, which don't support cross-Region replicas." type: "string" sourceRegion: - description: "SourceRegion is the source region where the resource exists. This is not sent over the wire and is only used for presigning. This value should always have the same region as the source ARN." + description: "SourceRegion is the source region where the resource exists. This is not\nsent over the wire and is only used for presigning. This value should always\nhave the same region as the source ARN." type: "string" storageEncrypted: - description: "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted. \n For RDS Custom instances, either set this parameter to true or leave it unset. If you set this parameter to false, RDS reports an error. \n Amazon Aurora \n Not applicable. The encryption for DB instances is managed by the DB cluster." + description: "A value that indicates whether the DB instance is encrypted. By default,\nit isn't encrypted.\n\n\nFor RDS Custom instances, either set this parameter to true or leave it unset.\nIf you set this parameter to false, RDS reports an error.\n\n\nAmazon Aurora\n\n\nNot applicable. The encryption for DB instances is managed by the DB cluster." type: "boolean" storageThroughput: - description: "Specifies the storage throughput value for the DB instance. \n This setting applies only to the gp3 storage type. \n This setting doesn't apply to RDS Custom or Amazon Aurora." + description: "Specifies the storage throughput value for the DB instance.\n\n\nThis setting applies only to the gp3 storage type.\n\n\nThis setting doesn't apply to RDS Custom or Amazon Aurora." format: "int64" type: "integer" storageType: - description: "Specifies the storage type to be associated with the DB instance. \n Valid values: gp2 | gp3 | io1 | standard \n If you specify io1 or gp3, you must also include a value for the Iops parameter. \n Default: io1 if the Iops parameter is specified, otherwise gp2 \n Amazon Aurora \n Not applicable. Storage is managed by the DB cluster." + description: "Specifies the storage type to be associated with the DB instance.\n\n\nValid values: gp2 | gp3 | io1 | standard\n\n\nIf you specify io1 or gp3, you must also include a value for the Iops parameter.\n\n\nDefault: io1 if the Iops parameter is specified, otherwise gp2\n\n\nAmazon Aurora\n\n\nNot applicable. Storage is managed by the DB cluster." type: "string" tags: description: "Tags to assign to the DB instance." items: - description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair. \n For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide." + description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\n\nFor more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html)\nin the Amazon RDS User Guide." properties: key: type: "string" @@ -285,28 +285,28 @@ spec: type: "object" type: "array" tdeCredentialARN: - description: "The ARN from the key store with which to associate the instance for TDE encryption. \n This setting doesn't apply to RDS Custom. \n Amazon Aurora \n Not applicable." + description: "The ARN from the key store with which to associate the instance for TDE encryption.\n\n\nThis setting doesn't apply to RDS Custom.\n\n\nAmazon Aurora\n\n\nNot applicable." type: "string" tdeCredentialPassword: - description: "The password for the given ARN from the key store in order to access the device. \n This setting doesn't apply to RDS Custom." + description: "The password for the given ARN from the key store in order to access the\ndevice.\n\n\nThis setting doesn't apply to RDS Custom." type: "string" timezone: - description: "The time zone of the DB instance. The time zone parameter is currently supported only by Microsoft SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone)." + description: "The time zone of the DB instance. The time zone parameter is currently supported\nonly by Microsoft SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone)." type: "string" useDefaultProcessorFeatures: - description: "A value that indicates whether the DB instance class of the DB instance uses its default processor features. \n This setting doesn't apply to RDS Custom." + description: "A value that indicates whether the DB instance class of the DB instance uses\nits default processor features.\n\n\nThis setting doesn't apply to RDS Custom." type: "boolean" vpcSecurityGroupIDs: - description: "A list of Amazon EC2 VPC security groups to associate with this DB instance. \n Amazon Aurora \n Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. \n Default: The default EC2 VPC security group for the DB subnet group's VPC." + description: "A list of Amazon EC2 VPC security groups to associate with this DB instance.\n\n\nAmazon Aurora\n\n\nNot applicable. The associated list of EC2 VPC security groups is managed\nby the DB cluster.\n\n\nDefault: The default EC2 VPC security group for the DB subnet group's VPC." items: type: "string" type: "array" vpcSecurityGroupRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -322,13 +322,13 @@ spec: description: "DBInstanceStatus defines the observed state of DBInstance" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -338,16 +338,16 @@ spec: - "region" type: "object" activityStreamEngineNativeAuditFieldsIncluded: - description: "Indicates whether engine-native audit fields are included in the database activity stream." + description: "Indicates whether engine-native audit fields are included in the database\nactivity stream." type: "boolean" activityStreamKMSKeyID: - description: "The Amazon Web Services KMS key identifier used for encrypting messages in the database activity stream. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key." + description: "The Amazon Web Services KMS key identifier used for encrypting messages in\nthe database activity stream. The Amazon Web Services KMS key identifier\nis the key ARN, key ID, alias ARN, or alias name for the KMS key." type: "string" activityStreamKinesisStreamName: - description: "The name of the Amazon Kinesis data stream used for the database activity stream." + description: "The name of the Amazon Kinesis data stream used for the database activity\nstream." type: "string" activityStreamMode: - description: "The mode of the database activity stream. Database events such as a change or access generate an activity stream event. RDS for Oracle always handles these events asynchronously." + description: "The mode of the database activity stream. Database events such as a change\nor access generate an activity stream event. RDS for Oracle always handles\nthese events asynchronously." type: "string" activityStreamPolicyStatus: description: "The status of the policy state of the activity stream." @@ -356,9 +356,9 @@ spec: description: "The status of the database activity stream." type: "string" associatedRoles: - description: "The Amazon Web Services Identity and Access Management (IAM) roles associated with the DB instance." + description: "The Amazon Web Services Identity and Access Management (IAM) roles associated\nwith the DB instance." items: - description: "Describes an Amazon Web Services Identity and Access Management (IAM) role that is associated with a DB instance." + description: "Describes an Amazon Web Services Identity and Access Management (IAM) role\nthat is associated with a DB instance." properties: featureName: type: "string" @@ -373,10 +373,10 @@ spec: format: "date-time" type: "string" automationMode: - description: "The automation mode of the RDS Custom DB instance: full or all paused. If full, the DB instance automates monitoring and instance recovery. If all paused, the instance pauses automation for the duration set by --resume-full-automation-mode-minutes." + description: "The automation mode of the RDS Custom DB instance: full or all paused. If\nfull, the DB instance automates monitoring and instance recovery. If all\npaused, the instance pauses automation for the duration set by --resume-full-automation-mode-minutes." type: "string" awsBackupRecoveryPointARN: - description: "The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup." + description: "The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services\nBackup." type: "string" certificateDetails: description: "The details of the DB instance's server certificate." @@ -388,9 +388,9 @@ spec: type: "string" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -414,28 +414,28 @@ spec: type: "object" type: "array" customerOwnedIPEnabled: - description: "Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance. \n A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network. \n For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. \n For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) in the Amazon Web Services Outposts User Guide." + description: "Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS\non Outposts DB instance.\n\n\nA CoIP provides local or external connectivity to resources in your Outpost\nsubnets through your on-premises network. For some use cases, a CoIP can\nprovide lower latency for connections to the DB instance from outside of\nits virtual private cloud (VPC) on your local network.\n\n\nFor more information about RDS on Outposts, see Working with Amazon RDS on\nAmazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html)\nin the Amazon RDS User Guide.\n\n\nFor more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing)\nin the Amazon Web Services Outposts User Guide." type: "boolean" dbInstanceAutomatedBackupsReplications: description: "The list of replicated automated backups associated with the DB instance." items: - description: "Automated backups of a DB instance replicated to another Amazon Web Services Region. They consist of system backups, transaction logs, and database instance properties." + description: "Automated backups of a DB instance replicated to another Amazon Web Services\nRegion. They consist of system backups, transaction logs, and database instance\nproperties." properties: dbInstanceAutomatedBackupsARN: type: "string" type: "object" type: "array" dbInstancePort: - description: "Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port." + description: "Specifies the port that the DB instance listens on. If the DB instance is\npart of a DB cluster, this can be a different port than the DB cluster port." format: "int64" type: "integer" dbInstanceStatus: - description: "Specifies the current state of this database. \n For information about DB instance statuses, see Viewing DB instance status (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html#Overview.DBInstance.Status) in the Amazon RDS User Guide." + description: "Specifies the current state of this database.\n\n\nFor information about DB instance statuses, see Viewing DB instance status\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html#Overview.DBInstance.Status)\nin the Amazon RDS User Guide." type: "string" dbParameterGroups: description: "Provides the list of DB parameter groups applied to this DB instance." items: - description: "The status of the DB parameter group. \n This data type is used as a response element in the following actions: \n * CreateDBInstance \n * CreateDBInstanceReadReplica \n * DeleteDBInstance \n * ModifyDBInstance \n * RebootDBInstance \n * RestoreDBInstanceFromDBSnapshot" + description: "The status of the DB parameter group.\n\n\nThis data type is used as a response element in the following actions:\n\n\n * CreateDBInstance\n\n\n * CreateDBInstanceReadReplica\n\n\n * DeleteDBInstance\n\n\n * ModifyDBInstance\n\n\n * RebootDBInstance\n\n\n * RestoreDBInstanceFromDBSnapshot" properties: dbParameterGroupName: type: "string" @@ -444,7 +444,7 @@ spec: type: "object" type: "array" dbSubnetGroup: - description: "Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group." + description: "Specifies information on the subnet group associated with the DB instance,\nincluding the name, description, and subnets in the subnet group." properties: dbSubnetGroupARN: type: "string" @@ -456,10 +456,10 @@ spec: type: "string" subnets: items: - description: "This data type is used as a response element for the DescribeDBSubnetGroups operation." + description: "This data type is used as a response element for the DescribeDBSubnetGroups\noperation." properties: subnetAvailabilityZone: - description: "Contains Availability Zone information. \n This data type is used as an element in the OrderableDBInstanceOption data type." + description: "Contains Availability Zone information.\n\n\nThis data type is used as an element in the OrderableDBInstanceOption data\ntype." properties: name: type: "string" @@ -467,7 +467,7 @@ spec: subnetIdentifier: type: "string" subnetOutpost: - description: "A data type that represents an Outpost. \n For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide." + description: "A data type that represents an Outpost.\n\n\nFor more information about RDS on Outposts, see Amazon RDS on Amazon Web\nServices Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html)\nin the Amazon RDS User Guide." properties: arn: type: "string" @@ -484,15 +484,15 @@ spec: type: "string" type: "object" dbSystemID: - description: "The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle SID is also the name of the CDB. This setting is valid for RDS Custom only." + description: "The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle\nSID is also the name of the CDB. This setting is valid for RDS Custom only." type: "string" dbiResourceID: - description: "The Amazon Web Services Region-unique, immutable identifier for the DB instance. This identifier is found in Amazon Web Services CloudTrail log entries whenever the Amazon Web Services KMS key for the DB instance is accessed." + description: "The Amazon Web Services Region-unique, immutable identifier for the DB instance.\nThis identifier is found in Amazon Web Services CloudTrail log entries whenever\nthe Amazon Web Services KMS key for the DB instance is accessed." type: "string" domainMemberships: description: "The Active Directory Domain membership records associated with the DB instance." items: - description: "An Active Directory Domain membership record associated with the DB instance or cluster." + description: "An Active Directory Domain membership record associated with the DB instance\nor cluster." properties: domain: type: "string" @@ -505,12 +505,12 @@ spec: type: "object" type: "array" enabledCloudwatchLogsExports: - description: "A list of log types that this DB instance is configured to export to CloudWatch Logs. \n Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html) in the Amazon RDS User Guide." + description: "A list of log types that this DB instance is configured to export to CloudWatch\nLogs.\n\n\nLog types vary by DB engine. For information about the log types for each\nDB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html)\nin the Amazon RDS User Guide." items: type: "string" type: "array" endpoint: - description: "Specifies the connection endpoint. \n The endpoint might not be shown for instances whose status is creating." + description: "Specifies the connection endpoint.\n\n\nThe endpoint might not be shown for instances whose status is creating." properties: address: type: "string" @@ -521,17 +521,17 @@ spec: type: "integer" type: "object" enhancedMonitoringResourceARN: - description: "The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that receives the Enhanced Monitoring metrics data for the DB instance." + description: "The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that\nreceives the Enhanced Monitoring metrics data for the DB instance." type: "string" iamDatabaseAuthenticationEnabled: - description: "True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false. \n IAM database authentication can be enabled for the following database engines \n * For MySQL 5.6, minor version 5.6.34 or higher \n * For MySQL 5.7, minor version 5.7.16 or higher \n * Aurora 5.6 or higher. To enable IAM database authentication for Aurora, see DBCluster Type." + description: "True if mapping of Amazon Web Services Identity and Access Management (IAM)\naccounts to database accounts is enabled, and otherwise false.\n\n\nIAM database authentication can be enabled for the following database engines\n\n\n * For MySQL 5.6, minor version 5.6.34 or higher\n\n\n * For MySQL 5.7, minor version 5.7.16 or higher\n\n\n * Aurora 5.6 or higher. To enable IAM database authentication for Aurora,\n see DBCluster Type." type: "boolean" instanceCreateTime: description: "Provides the date and time the DB instance was created." format: "date-time" type: "string" latestRestorableTime: - description: "Specifies the latest time to which a database can be restored with point-in-time restore." + description: "Specifies the latest time to which a database can be restored with point-in-time\nrestore." format: "date-time" type: "string" listenerEndpoint: @@ -546,7 +546,7 @@ spec: type: "integer" type: "object" masterUserSecret: - description: "Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. \n For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide." + description: "Contains the secret managed by RDS in Amazon Web Services Secrets Manager\nfor the master user password.\n\n\nFor more information, see Password management with Amazon Web Services Secrets\nManager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html)\nin the Amazon RDS User Guide." properties: kmsKeyID: type: "string" @@ -567,7 +567,7 @@ spec: type: "object" type: "array" pendingModifiedValues: - description: "A value that specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements." + description: "A value that specifies that changes to the DB instance are pending. This\nelement is only included when changes are pending. Specific changes are identified\nby subelements." properties: allocatedStorage: format: "int64" @@ -599,7 +599,7 @@ spec: multiAZ: type: "boolean" pendingCloudwatchLogsExports: - description: "A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated." + description: "A list of the log types whose configuration is still pending. In other words,\nthese log types are in the process of being activated or deactivated." properties: logTypesToDisable: items: @@ -615,7 +615,7 @@ spec: type: "integer" processorFeatures: items: - description: "Contains the processor features of a DB instance class. \n To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter. \n You can set the processor features of the DB instance class for a DB instance when you call one of the following actions: \n * CreateDBInstance \n * ModifyDBInstance \n * RestoreDBInstanceFromDBSnapshot \n * RestoreDBInstanceFromS3 \n * RestoreDBInstanceToPointInTime \n You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter. \n In addition, you can use the following actions for DB instance class processor information: \n * DescribeDBInstances \n * DescribeDBSnapshots \n * DescribeValidDBInstanceModifications \n If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met: \n * You are accessing an Oracle DB instance. \n * Your Oracle DB instance class supports configuring the number of CPU cores and threads per core. \n * The current number CPU cores and threads is set to a non-default value. \n For more information, see Configuring the Processor of the DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor) in the Amazon RDS User Guide." + description: "Contains the processor features of a DB instance class.\n\n\nTo specify the number of CPU cores, use the coreCount feature name for the\nName parameter. To specify the number of threads per core, use the threadsPerCore\nfeature name for the Name parameter.\n\n\nYou can set the processor features of the DB instance class for a DB instance\nwhen you call one of the following actions:\n\n\n * CreateDBInstance\n\n\n * ModifyDBInstance\n\n\n * RestoreDBInstanceFromDBSnapshot\n\n\n * RestoreDBInstanceFromS3\n\n\n * RestoreDBInstanceToPointInTime\n\n\nYou can view the valid processor values for a particular instance class by\ncalling the DescribeOrderableDBInstanceOptions action and specifying the\ninstance class for the DBInstanceClass parameter.\n\n\nIn addition, you can use the following actions for DB instance class processor\ninformation:\n\n\n * DescribeDBInstances\n\n\n * DescribeDBSnapshots\n\n\n * DescribeValidDBInstanceModifications\n\n\nIf you call DescribeDBInstances, ProcessorFeature returns non-null values\nonly if the following conditions are met:\n\n\n * You are accessing an Oracle DB instance.\n\n\n * Your Oracle DB instance class supports configuring the number of CPU\n cores and threads per core.\n\n\n * The current number CPU cores and threads is set to a non-default value.\n\n\nFor more information, see Configuring the Processor of the DB Instance Class\n(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor)\nin the Amazon RDS User Guide." properties: name: type: "string" @@ -633,30 +633,30 @@ spec: type: "string" type: "object" readReplicaDBClusterIdentifiers: - description: "Contains one or more identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a read replica. For example, when you create an Aurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB cluster for the Aurora read replica is shown. This output doesn't contain information about cross-Region Aurora read replicas. \n Currently, each RDS DB instance can have only one Aurora read replica." + description: "Contains one or more identifiers of Aurora DB clusters to which the RDS DB\ninstance is replicated as a read replica. For example, when you create an\nAurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB\ncluster for the Aurora read replica is shown. This output doesn't contain\ninformation about cross-Region Aurora read replicas.\n\n\nCurrently, each RDS DB instance can have only one Aurora read replica." items: type: "string" type: "array" readReplicaDBInstanceIdentifiers: - description: "Contains one or more identifiers of the read replicas associated with this DB instance." + description: "Contains one or more identifiers of the read replicas associated with this\nDB instance." items: type: "string" type: "array" readReplicaSourceDBClusterIdentifier: - description: "Contains the identifier of the source DB cluster if this DB instance is a read replica." + description: "Contains the identifier of the source DB cluster if this DB instance is a\nread replica." type: "string" readReplicaSourceDBInstanceIdentifier: - description: "Contains the identifier of the source DB instance if this DB instance is a read replica." + description: "Contains the identifier of the source DB instance if this DB instance is\na read replica." type: "string" resumeFullAutomationModeTime: - description: "The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation. The minimum value is 60 (default). The maximum value is 1,440." + description: "The number of minutes to pause the automation. When the time period ends,\nRDS Custom resumes full automation. The minimum value is 60 (default). The\nmaximum value is 1,440." format: "date-time" type: "string" secondaryAvailabilityZone: - description: "If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support." + description: "If present, specifies the name of the secondary Availability Zone for a DB\ninstance with multi-AZ support." type: "string" statusInfos: - description: "The status of a read replica. If the instance isn't a read replica, this is blank." + description: "The status of a read replica. If the instance isn't a read replica, this\nis blank." items: description: "Provides a list of status information for a DB instance." properties: @@ -671,9 +671,9 @@ spec: type: "object" type: "array" vpcSecurityGroups: - description: "Provides a list of VPC security group elements that the DB instance belongs to." + description: "Provides a list of VPC security group elements that the DB instance belongs\nto." items: - description: "This data type is used as a response element for queries on VPC security group membership." + description: "This data type is used as a response element for queries on VPC security\ngroup membership." properties: status: type: "string" diff --git a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbparametergroups.yaml b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbparametergroups.yaml index 0936e36f8..580b37f80 100644 --- a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbparametergroups.yaml +++ b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbparametergroups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dbparametergroups.rds.services.k8s.aws" spec: group: "rds.services.k8s.aws" @@ -19,24 +19,24 @@ spec: description: "DBParameterGroup is the Schema for the DBParameterGroups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DBParameterGroupSpec defines the desired state of DBParameterGroup. \n Contains the details of an Amazon RDS DB parameter group. \n This data type is used as a response element in the DescribeDBParameterGroups action." + description: "DBParameterGroupSpec defines the desired state of DBParameterGroup.\n\n\nContains the details of an Amazon RDS DB parameter group.\n\n\nThis data type is used as a response element in the DescribeDBParameterGroups\naction." properties: description: description: "The description for the DB parameter group." type: "string" family: - description: "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family. \n To list all of the available parameter group families for a DB engine, use the following command: \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine \n For example, to list all of the available parameter group families for the MySQL DB engine, use the following command: \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql \n The output contains duplicates. \n The following are the valid DB engine values: \n * aurora (for MySQL 5.6-compatible Aurora) \n * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) \n * aurora-postgresql \n * mariadb \n * mysql \n * oracle-ee \n * oracle-ee-cdb \n * oracle-se2 \n * oracle-se2-cdb \n * postgres \n * sqlserver-ee \n * sqlserver-se \n * sqlserver-ex \n * sqlserver-web" + description: "The DB parameter group family name. A DB parameter group can be associated\nwith one and only one DB parameter group family, and can be applied only\nto a DB instance running a database engine and engine version compatible\nwith that DB parameter group family.\n\n\nTo list all of the available parameter group families for a DB engine, use\nthe following command:\n\n\naws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"\n--engine \n\n\nFor example, to list all of the available parameter group families for the\nMySQL DB engine, use the following command:\n\n\naws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"\n--engine mysql\n\n\nThe output contains duplicates.\n\n\nThe following are the valid DB engine values:\n\n\n * aurora (for MySQL 5.6-compatible Aurora)\n\n\n * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)\n\n\n * aurora-postgresql\n\n\n * mariadb\n\n\n * mysql\n\n\n * oracle-ee\n\n\n * oracle-ee-cdb\n\n\n * oracle-se2\n\n\n * oracle-se2-cdb\n\n\n * postgres\n\n\n * sqlserver-ee\n\n\n * sqlserver-se\n\n\n * sqlserver-ex\n\n\n * sqlserver-web" type: "string" name: - description: "The name of the DB parameter group. \n Constraints: \n * Must be 1 to 255 letters, numbers, or hyphens. \n * First character must be a letter \n * Can't end with a hyphen or contain two consecutive hyphens \n This value is stored as a lowercase string." + description: "The name of the DB parameter group.\n\n\nConstraints:\n\n\n * Must be 1 to 255 letters, numbers, or hyphens.\n\n\n * First character must be a letter\n\n\n * Can't end with a hyphen or contain two consecutive hyphens\n\n\nThis value is stored as a lowercase string." type: "string" parameterOverrides: additionalProperties: @@ -45,7 +45,7 @@ spec: tags: description: "Tags to assign to the DB parameter group." items: - description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair. \n For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide." + description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\n\nFor more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html)\nin the Amazon RDS User Guide." properties: key: type: "string" @@ -62,13 +62,13 @@ spec: description: "DBParameterGroupStatus defines the observed state of DBParameterGroup" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -78,9 +78,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -106,7 +106,7 @@ spec: parameterOverrideStatuses: description: "A list of Parameter values." items: - description: "This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions. \n This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions." + description: "This data type is used as a request parameter in the ModifyDBParameterGroup\nand ResetDBParameterGroup actions.\n\n\nThis data type is used as a response element in the DescribeEngineDefaultParameters\nand DescribeDBParameters actions." properties: allowedValues: type: "string" diff --git a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbproxies.yaml b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbproxies.yaml index 7b8439424..fd7c45a09 100644 --- a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbproxies.yaml +++ b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbproxies.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dbproxies.rds.services.k8s.aws" spec: group: "rds.services.k8s.aws" @@ -19,20 +19,20 @@ spec: description: "DBProxy is the Schema for the DBProxies API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DBProxySpec defines the desired state of DBProxy. \n The data structure representing a proxy managed by the RDS Proxy. \n This data type is used as a response element in the DescribeDBProxies action." + description: "DBProxySpec defines the desired state of DBProxy.\n\n\nThe data structure representing a proxy managed by the RDS Proxy.\n\n\nThis data type is used as a response element in the DescribeDBProxies action." properties: auth: description: "The authorization mechanism that the proxy uses." items: - description: "Specifies the details of authentication used by a proxy to log in as a specific database user." + description: "Specifies the details of authentication used by a proxy to log in as a specific\ndatabase user." properties: authScheme: type: "string" @@ -49,28 +49,28 @@ spec: type: "object" type: "array" debugLogging: - description: "Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs." + description: "Whether the proxy includes detailed information about SQL statements in its\nlogs. This information helps you to debug issues involving SQL behavior or\nthe performance and scalability of the proxy connections. The debug information\nincludes the text of SQL statements that you submit through the proxy. Thus,\nonly enable this setting when needed for debugging, and only when you have\nsecurity measures in place to safeguard any sensitive information that appears\nin the logs." type: "boolean" engineFamily: - description: "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL. For RDS for Microsoft SQL Server, specify SQLSERVER." + description: "The kinds of databases that the proxy can connect to. This value determines\nwhich database network protocol the proxy recognizes when it interprets network\ntraffic to and from the database. For Aurora MySQL, RDS for MariaDB, and\nRDS for MySQL databases, specify MYSQL. For Aurora PostgreSQL and RDS for\nPostgreSQL databases, specify POSTGRESQL. For RDS for Microsoft SQL Server,\nspecify SQLSERVER." type: "string" idleClientTimeout: - description: "The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database." + description: "The number of seconds that a connection to the proxy can be inactive before\nthe proxy disconnects it. You can set this value higher or lower than the\nconnection timeout limit for the associated database." format: "int64" type: "integer" name: - description: "The identifier for the proxy. This name must be unique for all proxies owned by your Amazon Web Services account in the specified Amazon Web Services Region. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens." + description: "The identifier for the proxy. This name must be unique for all proxies owned\nby your Amazon Web Services account in the specified Amazon Web Services\nRegion. An identifier must begin with a letter and must contain only ASCII\nletters, digits, and hyphens; it can't end with a hyphen or contain two consecutive\nhyphens." type: "string" requireTLS: - description: "A Boolean parameter that specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy." + description: "A Boolean parameter that specifies whether Transport Layer Security (TLS)\nencryption is required for connections to the proxy. By enabling this setting,\nyou can enforce encrypted TLS connections to the proxy." type: "boolean" roleARN: - description: "The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in Amazon Web Services Secrets Manager." + description: "The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access\nsecrets in Amazon Web Services Secrets Manager." type: "string" tags: - description: "An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy." + description: "An optional set of key-value pairs to associate arbitrary data of your choosing\nwith the proxy." items: - description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair. \n For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide." + description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\n\nFor more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html)\nin the Amazon RDS User Guide." properties: key: type: "string" @@ -99,13 +99,13 @@ spec: description: "DBProxyStatus defines the observed state of DBProxy" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -115,9 +115,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -145,10 +145,10 @@ spec: format: "date-time" type: "string" endpoint: - description: "The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application." + description: "The endpoint that you can use to connect to the DB proxy. You include the\nendpoint value in the connection string for a database client application." type: "string" status: - description: "The current status of this proxy. A status of available means the proxy is ready to handle requests. Other values indicate that you must wait for the proxy to be ready, or take some action to resolve an issue." + description: "The current status of this proxy. A status of available means the proxy is\nready to handle requests. Other values indicate that you must wait for the\nproxy to be ready, or take some action to resolve an issue." type: "string" updatedDate: description: "The date and time when the proxy was last updated." diff --git a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbsubnetgroups.yaml b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbsubnetgroups.yaml index 5f3ffa241..1939bbddd 100644 --- a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbsubnetgroups.yaml +++ b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/dbsubnetgroups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dbsubnetgroups.rds.services.k8s.aws" spec: group: "rds.services.k8s.aws" @@ -19,21 +19,21 @@ spec: description: "DBSubnetGroup is the Schema for the DBSubnetGroups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "DBSubnetGroupSpec defines the desired state of DBSubnetGroup. \n Contains the details of an Amazon RDS DB subnet group. \n This data type is used as a response element in the DescribeDBSubnetGroups action." + description: "DBSubnetGroupSpec defines the desired state of DBSubnetGroup.\n\n\nContains the details of an Amazon RDS DB subnet group.\n\n\nThis data type is used as a response element in the DescribeDBSubnetGroups\naction." properties: description: description: "The description for the DB subnet group." type: "string" name: - description: "The name for the DB subnet group. This value is stored as a lowercase string. \n Constraints: \n * Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. \n * Must not be default. \n * First character must be a letter. \n Example: mydbsubnetgroup" + description: "The name for the DB subnet group. This value is stored as a lowercase string.\n\n\nConstraints:\n\n\n * Must contain no more than 255 letters, numbers, periods, underscores,\n spaces, or hyphens.\n\n\n * Must not be default.\n\n\n * First character must be a letter.\n\n\nExample: mydbsubnetgroup" type: "string" subnetIDs: description: "The EC2 Subnet IDs for the DB subnet group." @@ -42,10 +42,10 @@ spec: type: "array" subnetRefs: items: - description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: \n from: name: my-api" + description: "AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference\ntype to provide more user friendly syntax for references using 'from' field\nEx:\nAPIIDRef:\n\n\n\tfrom:\n\t name: my-api" properties: from: - description: "AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name)" + description: "AWSResourceReference provides all the values necessary to reference another\nk8s resource for finding the identifier(Id/ARN/Name)" properties: name: type: "string" @@ -55,7 +55,7 @@ spec: tags: description: "Tags to assign to the DB subnet group." items: - description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair. \n For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide." + description: "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\n\nFor more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html)\nin the Amazon RDS User Guide." properties: key: type: "string" @@ -71,13 +71,13 @@ spec: description: "DBSubnetGroupStatus defines the observed state of DBSubnetGroup" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -87,9 +87,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -118,10 +118,10 @@ spec: subnets: description: "Contains a list of Subnet elements." items: - description: "This data type is used as a response element for the DescribeDBSubnetGroups operation." + description: "This data type is used as a response element for the DescribeDBSubnetGroups\noperation." properties: subnetAvailabilityZone: - description: "Contains Availability Zone information. \n This data type is used as an element in the OrderableDBInstanceOption data type." + description: "Contains Availability Zone information.\n\n\nThis data type is used as an element in the OrderableDBInstanceOption data\ntype." properties: name: type: "string" @@ -129,7 +129,7 @@ spec: subnetIdentifier: type: "string" subnetOutpost: - description: "A data type that represents an Outpost. \n For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide." + description: "A data type that represents an Outpost.\n\n\nFor more information about RDS on Outposts, see Amazon RDS on Amazon Web\nServices Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html)\nin the Amazon RDS User Guide." properties: arn: type: "string" @@ -139,7 +139,7 @@ spec: type: "object" type: "array" supportedNetworkTypes: - description: "The network type of the DB subnet group. \n Valid values: \n * IPV4 \n * DUAL \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). \n For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the Amazon RDS User Guide." + description: "The network type of the DB subnet group.\n\n\nValid values:\n\n\n * IPV4\n\n\n * DUAL\n\n\nA DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6\nprotocols (DUAL).\n\n\nFor more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html)\nin the Amazon RDS User Guide." items: type: "string" type: "array" diff --git a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/globalclusters.yaml b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/globalclusters.yaml index fce5ba80b..4244cb27d 100644 --- a/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/globalclusters.yaml +++ b/crd-catalog/aws-controllers-k8s/rds-controller/rds.services.k8s.aws/v1alpha1/globalclusters.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "globalclusters.rds.services.k8s.aws" spec: group: "rds.services.k8s.aws" @@ -19,21 +19,21 @@ spec: description: "GlobalCluster is the Schema for the GlobalClusters API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "GlobalClusterSpec defines the desired state of GlobalCluster. \n A data type representing an Aurora global database." + description: "GlobalClusterSpec defines the desired state of GlobalCluster.\n\n\nA data type representing an Aurora global database." properties: databaseName: - description: "The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating." + description: "The name for your database of up to 64 alphanumeric characters. If you do\nnot provide a name, Amazon Aurora will not create a database in the global\ndatabase cluster you are creating." type: "string" deletionProtection: - description: "The deletion protection setting for the new global database. The global database can't be deleted when deletion protection is enabled." + description: "The deletion protection setting for the new global database. The global database\ncan't be deleted when deletion protection is enabled." type: "boolean" engine: description: "The name of the database engine to be used for this DB cluster." @@ -45,7 +45,7 @@ spec: description: "The cluster identifier of the new global database cluster." type: "string" sourceDBClusterIdentifier: - description: "The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional." + description: "The Amazon Resource Name (ARN) to use as the primary cluster of the global\ndatabase. This parameter is optional." type: "string" storageEncrypted: description: "The storage encryption setting for the new global database cluster." @@ -55,13 +55,13 @@ spec: description: "GlobalClusterStatus defines the observed state of GlobalCluster" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -71,9 +71,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -97,7 +97,7 @@ spec: type: "object" type: "array" failoverState: - description: "A data object containing all properties for the current state of an in-process or pending failover process for this Aurora global database. This object is empty unless the FailoverGlobalCluster API operation has been called on this Aurora global database (GlobalCluster)." + description: "A data object containing all properties for the current state of an in-process\nor pending failover process for this Aurora global database. This object\nis empty unless the FailoverGlobalCluster API operation has been called on\nthis Aurora global database (GlobalCluster)." properties: fromDBClusterARN: type: "string" @@ -109,7 +109,7 @@ spec: globalClusterMembers: description: "The list of primary and secondary clusters within the global database cluster." items: - description: "A data structure with information about any primary and secondary clusters associated with an Aurora global database." + description: "A data structure with information about any primary and secondary clusters\nassociated with an Aurora global database." properties: dbClusterARN: type: "string" @@ -124,7 +124,7 @@ spec: type: "object" type: "array" globalClusterResourceID: - description: "The Amazon Web Services Region-unique, immutable identifier for the global database cluster. This identifier is found in Amazon Web Services CloudTrail log entries whenever the Amazon Web Services KMS key for the DB cluster is accessed." + description: "The Amazon Web Services Region-unique, immutable identifier for the global\ndatabase cluster. This identifier is found in Amazon Web Services CloudTrail\nlog entries whenever the Amazon Web Services KMS key for the DB cluster is\naccessed." type: "string" status: description: "Specifies the current state of this global database cluster." diff --git a/crd-catalog/aws-controllers-k8s/s3-controller/s3.services.k8s.aws/v1alpha1/buckets.yaml b/crd-catalog/aws-controllers-k8s/s3-controller/s3.services.k8s.aws/v1alpha1/buckets.yaml index 7fc8c8bc9..6c5862f2d 100644 --- a/crd-catalog/aws-controllers-k8s/s3-controller/s3.services.k8s.aws/v1alpha1/buckets.yaml +++ b/crd-catalog/aws-controllers-k8s/s3-controller/s3.services.k8s.aws/v1alpha1/buckets.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "buckets.s3.services.k8s.aws" spec: group: "s3.services.k8s.aws" @@ -19,15 +19,15 @@ spec: description: "Bucket is the Schema for the Buckets API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "BucketSpec defines the desired state of Bucket. \n In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally unique, and the namespace is shared by all Amazon Web Services accounts." + description: "BucketSpec defines the desired state of Bucket.\n\n\nIn terms of implementation, a Bucket is a resource. An Amazon S3 bucket name\nis globally unique, and the namespace is shared by all Amazon Web Services\naccounts." properties: accelerate: description: "Container for setting the transfer acceleration state." @@ -40,13 +40,13 @@ spec: type: "string" analytics: items: - description: "Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket." + description: "Specifies the configuration and any analyses for the analytics filter of\nan Amazon S3 bucket." properties: filter: - description: "The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis." + description: "The filter used to describe a set of objects for analyses. A filter must\nhave exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator).\nIf no filter is provided, all objects will be considered in any analysis." properties: and: - description: "A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates in any combination, and an object must match all of the predicates for the filter to apply." + description: "A conjunction (logical AND) of predicates, which is used in evaluating a\nmetrics filter. The operator must have at least two predicates in any combination,\nand an object must match all of the predicates for the filter to apply." properties: prefix: type: "string" @@ -75,10 +75,10 @@ spec: id: type: "string" storageClassAnalysis: - description: "Specifies data related to access patterns to be collected and made available to analyze the tradeoffs between different storage classes for an Amazon S3 bucket." + description: "Specifies data related to access patterns to be collected and made available\nto analyze the tradeoffs between different storage classes for an Amazon\nS3 bucket." properties: dataExport: - description: "Container for data related to the storage class analysis for an Amazon S3 bucket for export." + description: "Container for data related to the storage class analysis for an Amazon S3\nbucket for export." properties: destination: description: "Where to publish the analytics results." @@ -103,7 +103,7 @@ spec: type: "object" type: "array" cors: - description: "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 User Guide." + description: "Describes the cross-origin access configuration for objects in an Amazon\nS3 bucket. For more information, see Enabling Cross-Origin Resource Sharing\n(https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon\nS3 User Guide." properties: corsRules: items: @@ -147,7 +147,7 @@ spec: description: "Specifies the default server-side encryption configuration." properties: applyServerSideEncryptionByDefault: - description: "Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) in the Amazon S3 API Reference." + description: "Describes the default server-side encryption to apply to new objects in the\nbucket. If a PUT Object request doesn't specify any server-side encryption,\nthis default encryption will be applied. If you don't specify a customer\nmanaged key at configuration, Amazon S3 automatically creates an Amazon Web\nServices KMS key in your Amazon Web Services account the first time that\nyou add an object encrypted with SSE-KMS to a bucket. By default, Amazon\nS3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption\n(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html)\nin the Amazon S3 API Reference." properties: kmsMasterKeyID: type: "string" @@ -160,7 +160,7 @@ spec: type: "array" type: "object" grantFullControl: - description: "Allows grantee the read, write, read ACP, and write ACP permissions on the bucket." + description: "Allows grantee the read, write, read ACP, and write ACP permissions on the\nbucket." type: "string" grantRead: description: "Allows grantee to list the objects in the bucket." @@ -169,20 +169,20 @@ spec: description: "Allows grantee to read the bucket ACL." type: "string" grantWrite: - description: "Allows grantee to create new objects in the bucket. \n For the bucket and object owners of existing objects, also allows deletions and overwrites of those objects." + description: "Allows grantee to create new objects in the bucket.\n\n\nFor the bucket and object owners of existing objects, also allows deletions\nand overwrites of those objects." type: "string" grantWriteACP: description: "Allows grantee to write the ACL for the applicable bucket." type: "string" intelligentTiering: items: - description: "Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. \n For information about the S3 Intelligent-Tiering storage class, see Storage class for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)." + description: "Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket.\n\n\nFor information about the S3 Intelligent-Tiering storage class, see Storage\nclass for automatically optimizing frequently and infrequently accessed objects\n(https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access)." properties: filter: - description: "The Filter is used to identify objects that the S3 Intelligent-Tiering configuration applies to." + description: "The Filter is used to identify objects that the S3 Intelligent-Tiering configuration\napplies to." properties: and: - description: "A container for specifying S3 Intelligent-Tiering filters. The filters determine the subset of objects to which the rule applies." + description: "A container for specifying S3 Intelligent-Tiering filters. The filters determine\nthe subset of objects to which the rule applies." properties: prefix: type: "string" @@ -214,7 +214,7 @@ spec: type: "string" tierings: items: - description: "The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead." + description: "The S3 Intelligent-Tiering storage class is designed to optimize storage\ncosts by automatically moving data to the most cost-effective storage access\ntier, without additional operational overhead." properties: accessTier: type: "string" @@ -227,20 +227,20 @@ spec: type: "array" inventory: items: - description: "Specifies the inventory configuration for an Amazon S3 bucket. For more information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the Amazon S3 API Reference." + description: "Specifies the inventory configuration for an Amazon S3 bucket. For more information,\nsee GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html)\nin the Amazon S3 API Reference." properties: destination: description: "Specifies the inventory configuration for an Amazon S3 bucket." properties: s3BucketDestination: - description: "Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published." + description: "Contains the bucket name, file format, bucket owner (optional), and prefix\n(optional) where inventory results are published." properties: accountID: type: "string" bucket: type: "string" encryption: - description: "Contains the type of server-side encryption used to encrypt the inventory results." + description: "Contains the type of server-side encryption used to encrypt the inventory\nresults." properties: sseKMS: description: "Specifies the use of SSE-KMS to encrypt delivered inventory reports." @@ -256,7 +256,7 @@ spec: type: "object" type: "object" filter: - description: "Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria." + description: "Specifies an inventory filter. The inventory only includes objects that meet\nthe filter's criteria." properties: prefix: type: "string" @@ -287,7 +287,7 @@ spec: description: "A lifecycle rule for individual objects in an Amazon S3 bucket." properties: abortIncompleteMultipartUpload: - description: "Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) in the Amazon S3 User Guide." + description: "Specifies the days since the initiation of an incomplete multipart upload\nthat Amazon S3 will wait before permanently removing all parts of the upload.\nFor more information, see Aborting Incomplete Multipart Uploads Using a Bucket\nLifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)\nin the Amazon S3 User Guide." properties: daysAfterInitiation: format: "int64" @@ -306,10 +306,10 @@ spec: type: "boolean" type: "object" filter: - description: "The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified." + description: "The Filter is used to identify objects that a Lifecycle Rule applies to.\nA Filter must have exactly one of Prefix, Tag, or And specified." properties: and: - description: "This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator." + description: "This is used in a Lifecycle Rule Filter to apply a logical AND to two or\nmore predicates. The Lifecycle Rule will apply to any object matching all\nof the predicates configured inside the And operator." properties: objectSizeGreaterThan: format: "int64" @@ -350,7 +350,7 @@ spec: id: type: "string" noncurrentVersionExpiration: - description: "Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime." + description: "Specifies when noncurrent object versions expire. Upon expiration, Amazon\nS3 permanently deletes the noncurrent object versions. You set this lifecycle\nconfiguration action on a bucket that has versioning enabled (or suspended)\nto request that Amazon S3 delete noncurrent object versions at a specific\nperiod in the object's lifetime." properties: newerNoncurrentVersions: format: "int64" @@ -361,7 +361,7 @@ spec: type: "object" noncurrentVersionTransitions: items: - description: "Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's lifetime." + description: "Container for the transition rule that describes when noncurrent objects\ntransition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR,\nGLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled\n(or versioning is suspended), you can set this action to request that Amazon\nS3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA,\nINTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at\na specific period in the object's lifetime." properties: newerNoncurrentVersions: format: "int64" @@ -379,7 +379,7 @@ spec: type: "string" transitions: items: - description: "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) in the Amazon S3 User Guide." + description: "Specifies when an object transitions to a specified storage class. For more\ninformation about Amazon S3 lifecycle configuration rules, see Transitioning\nObjects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)\nin the Amazon S3 User Guide." properties: date: format: "date-time" @@ -398,13 +398,13 @@ spec: description: "Container for logging status information." properties: loggingEnabled: - description: "Describes where logs are stored and the prefix that Amazon S3 assigns to all log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in the Amazon S3 API Reference." + description: "Describes where logs are stored and the prefix that Amazon S3 assigns to\nall log object keys for a bucket. For more information, see PUT Bucket logging\n(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)\nin the Amazon S3 API Reference." properties: targetBucket: type: "string" targetGrants: items: - description: "Container for granting information. \n Buckets that use the bucket owner enforced setting for Object Ownership don't support target grants. For more information, see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) in the Amazon S3 User Guide." + description: "Container for granting information.\n\n\nBuckets that use the bucket owner enforced setting for Object Ownership don't\nsupport target grants. For more information, see Permissions server access\nlog delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general)\nin the Amazon S3 User Guide." properties: grantee: description: "Container for the person being granted permissions." @@ -430,15 +430,15 @@ spec: type: "object" metrics: items: - description: "Specifies a metrics configuration for the CloudWatch request metrics (specified by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html)." + description: "Specifies a metrics configuration for the CloudWatch request metrics (specified\nby the metrics configuration ID) from an Amazon S3 bucket. If you're updating\nan existing metrics configuration, note that this is a full replacement of\nthe existing metrics configuration. If you don't include the elements you\nwant to keep, they are erased. For more information, see PutBucketMetricsConfiguration\n(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html)." properties: filter: - description: "Specifies a metrics configuration filter. The metrics configuration only includes objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)." + description: "Specifies a metrics configuration filter. The metrics configuration only\nincludes objects that meet the filter's criteria. A filter must be a prefix,\nan object tag, an access point ARN, or a conjunction (MetricsAndOperator).\nFor more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html)." properties: accessPointARN: type: "string" and: - description: "A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply." + description: "A conjunction (logical AND) of predicates, which is used in evaluating a\nmetrics filter. The operator must have at least two predicates, and an object\nmust match all of the predicates in order for the filter to apply." properties: accessPointARN: type: "string" @@ -474,7 +474,7 @@ spec: description: "The name of the bucket to create." type: "string" notification: - description: "A container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off for the bucket." + description: "A container for specifying the notification configuration of the bucket.\nIf this element is empty, notifications are turned off for the bucket." properties: lambdaFunctionConfigurations: items: @@ -485,15 +485,15 @@ spec: type: "string" type: "array" filter: - description: "Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide." + description: "Specifies object key name filtering rules. For information about key name\nfiltering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)\nin the Amazon S3 User Guide." properties: key: description: "A container for object key name prefix and suffix filtering rules." properties: filterRules: - description: "A list of containers for the key-value pair that defines the criteria for the filter rule." + description: "A list of containers for the key-value pair that defines the criteria for\nthe filter rule." items: - description: "Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix or prefix of the key name." + description: "Specifies the Amazon S3 object key name to filter on and whether to filter\non the suffix or prefix of the key name." properties: name: type: "string" @@ -504,7 +504,7 @@ spec: type: "object" type: "object" id: - description: "An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID." + description: "An optional unique identifier for configurations in a notification configuration.\nIf you don't provide one, Amazon S3 will assign an ID." type: "string" lambdaFunctionARN: type: "string" @@ -512,22 +512,22 @@ spec: type: "array" queueConfigurations: items: - description: "Specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events." + description: "Specifies the configuration for publishing messages to an Amazon Simple Queue\nService (Amazon SQS) queue when Amazon S3 detects specified events." properties: events: items: type: "string" type: "array" filter: - description: "Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide." + description: "Specifies object key name filtering rules. For information about key name\nfiltering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)\nin the Amazon S3 User Guide." properties: key: description: "A container for object key name prefix and suffix filtering rules." properties: filterRules: - description: "A list of containers for the key-value pair that defines the criteria for the filter rule." + description: "A list of containers for the key-value pair that defines the criteria for\nthe filter rule." items: - description: "Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix or prefix of the key name." + description: "Specifies the Amazon S3 object key name to filter on and whether to filter\non the suffix or prefix of the key name." properties: name: type: "string" @@ -538,7 +538,7 @@ spec: type: "object" type: "object" id: - description: "An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID." + description: "An optional unique identifier for configurations in a notification configuration.\nIf you don't provide one, Amazon S3 will assign an ID." type: "string" queueARN: type: "string" @@ -546,22 +546,22 @@ spec: type: "array" topicConfigurations: items: - description: "A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events." + description: "A container for specifying the configuration for publication of messages\nto an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3\ndetects specified events." properties: events: items: type: "string" type: "array" filter: - description: "Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide." + description: "Specifies object key name filtering rules. For information about key name\nfiltering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)\nin the Amazon S3 User Guide." properties: key: description: "A container for object key name prefix and suffix filtering rules." properties: filterRules: - description: "A list of containers for the key-value pair that defines the criteria for the filter rule." + description: "A list of containers for the key-value pair that defines the criteria for\nthe filter rule." items: - description: "Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix or prefix of the key name." + description: "Specifies the Amazon S3 object key name to filter on and whether to filter\non the suffix or prefix of the key name." properties: name: type: "string" @@ -572,7 +572,7 @@ spec: type: "object" type: "object" id: - description: "An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID." + description: "An optional unique identifier for configurations in a notification configuration.\nIf you don't provide one, Amazon S3 will assign an ID." type: "string" topicARN: type: "string" @@ -585,14 +585,14 @@ spec: objectOwnership: type: "string" ownershipControls: - description: "The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3 bucket." + description: "The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter)\nthat you want to apply to this Amazon S3 bucket." properties: rules: items: description: "The container element for an ownership control rule." properties: objectOwnership: - description: "The container element for object ownership for a bucket's ownership controls. \n BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the bucket-owner-full-control canned ACL. \n ObjectWriter - The uploading account will own the object if the object is uploaded with the bucket-owner-full-control canned ACL. \n BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer affect permissions. The bucket owner automatically owns and has full control over every object in the bucket. The bucket only accepts PUT requests that don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format." + description: "The container element for object ownership for a bucket's ownership controls.\n\n\nBucketOwnerPreferred - Objects uploaded to the bucket change ownership to\nthe bucket owner if the objects are uploaded with the bucket-owner-full-control\ncanned ACL.\n\n\nObjectWriter - The uploading account will own the object if the object is\nuploaded with the bucket-owner-full-control canned ACL.\n\n\nBucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer\naffect permissions. The bucket owner automatically owns and has full control\nover every object in the bucket. The bucket only accepts PUT requests that\ndon't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control\ncanned ACL or an equivalent form of this ACL expressed in the XML format." type: "string" type: "object" type: "array" @@ -601,7 +601,7 @@ spec: description: "The bucket policy as a JSON document." type: "string" publicAccessBlock: - description: "The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) in the Amazon S3 User Guide." + description: "The PublicAccessBlock configuration that you want to apply to this Amazon\nS3 bucket. You can enable the configuration options in any combination. For\nmore information about when Amazon S3 considers a bucket or object public,\nsee The Meaning of \"Public\" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)\nin the Amazon S3 User Guide." properties: blockPublicACLs: type: "boolean" @@ -613,7 +613,7 @@ spec: type: "boolean" type: "object" replication: - description: "A container for replication rules. You can add up to 1,000 rules. The maximum size of a replication configuration is 2 MB." + description: "A container for replication rules. You can add up to 1,000 rules. The maximum\nsize of a replication configuration is 2 MB." properties: role: type: "string" @@ -622,13 +622,13 @@ spec: description: "Specifies which Amazon S3 objects to replicate and where to store the replicas." properties: deleteMarkerReplication: - description: "Specifies whether Amazon S3 replicates delete markers. If you specify a Filter in your replication configuration, you must also include a DeleteMarkerReplication element. If your Filter includes a Tag element, the DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does not support replicating delete markers for tag-based rules. For an example configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). \n For more information about delete marker replication, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). \n If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations)." + description: "Specifies whether Amazon S3 replicates delete markers. If you specify a Filter\nin your replication configuration, you must also include a DeleteMarkerReplication\nelement. If your Filter includes a Tag element, the DeleteMarkerReplication\nStatus must be set to Disabled, because Amazon S3 does not support replicating\ndelete markers for tag-based rules. For an example configuration, see Basic\nRule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config).\n\n\nFor more information about delete marker replication, see Basic Rule Configuration\n(https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html).\n\n\nIf you are using an earlier version of the replication configuration, Amazon\nS3 handles replication of delete markers differently. For more information,\nsee Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations)." properties: status: type: "string" type: "object" destination: - description: "Specifies information about where to publish analysis or configuration results for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC)." + description: "Specifies information about where to publish analysis or configuration results\nfor an Amazon S3 bucket and S3 Replication Time Control (S3 RTC)." properties: accessControlTranslation: description: "A container for information about access control for replicas." @@ -641,16 +641,16 @@ spec: bucket: type: "string" encryptionConfiguration: - description: "Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects." + description: "Specifies encryption-related information for an Amazon S3 bucket that is\na destination for replicated objects." properties: replicaKMSKeyID: type: "string" type: "object" metrics: - description: "A container specifying replication metrics-related settings enabling replication metrics and events." + description: "A container specifying replication metrics-related settings enabling replication\nmetrics and events." properties: eventThreshold: - description: "A container specifying the time value for S3 Replication Time Control (S3 RTC) and replication metrics EventThreshold." + description: "A container specifying the time value for S3 Replication Time Control (S3\nRTC) and replication metrics EventThreshold." properties: minutes: format: "int64" @@ -660,12 +660,12 @@ spec: type: "string" type: "object" replicationTime: - description: "A container specifying S3 Replication Time Control (S3 RTC) related information, including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. Must be specified together with a Metrics block." + description: "A container specifying S3 Replication Time Control (S3 RTC) related information,\nincluding whether S3 RTC is enabled and the time when all objects and operations\non objects must be replicated. Must be specified together with a Metrics\nblock." properties: status: type: "string" time: - description: "A container specifying the time value for S3 Replication Time Control (S3 RTC) and replication metrics EventThreshold." + description: "A container specifying the time value for S3 Replication Time Control (S3\nRTC) and replication metrics EventThreshold." properties: minutes: format: "int64" @@ -676,16 +676,16 @@ spec: type: "string" type: "object" existingObjectReplication: - description: "Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) in the Amazon S3 User Guide." + description: "Optional configuration to replicate existing source bucket objects. For more\ninformation, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication)\nin the Amazon S3 User Guide." properties: status: type: "string" type: "object" filter: - description: "A filter that identifies the subset of objects to which the replication rule applies. A Filter must specify exactly one Prefix, Tag, or an And child element." + description: "A filter that identifies the subset of objects to which the replication rule\napplies. A Filter must specify exactly one Prefix, Tag, or an And child element." properties: and: - description: "A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. \n For example: \n * If you specify both a Prefix and a Tag filter, wrap these filters in an And tag. \n * If you specify a filter based on multiple tags, wrap the Tag elements in an And tag." + description: "A container for specifying rule filters. The filters determine the subset\nof objects to which the rule applies. This element is required only if you\nspecify more than one filter.\n\n\nFor example:\n\n\n * If you specify both a Prefix and a Tag filter, wrap these filters in\n an And tag.\n\n\n * If you specify a filter based on multiple tags, wrap the Tag elements\n in an And tag." properties: prefix: type: "string" @@ -719,16 +719,16 @@ spec: format: "int64" type: "integer" sourceSelectionCriteria: - description: "A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer managed key stored in Amazon Web Services Key Management Service (SSE-KMS)." + description: "A container that describes additional filters for identifying the source\nobjects that you want to replicate. You can choose to enable or disable the\nreplication of these objects. Currently, Amazon S3 supports only the filter\nthat you can specify for objects created with server-side encryption using\na customer managed key stored in Amazon Web Services Key Management Service\n(SSE-KMS)." properties: replicaModifications: - description: "A filter that you can specify for selection for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when Filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas. \n If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, this element is not allowed." + description: "A filter that you can specify for selection for modifications on replicas.\nAmazon S3 doesn't replicate replica modifications by default. In the latest\nversion of replication configuration (when Filter is specified), you can\nspecify this element and set the status to Enabled to replicate modifications\non replicas.\n\n\nIf you don't specify the Filter element, Amazon S3 assumes that the replication\nconfiguration is the earlier version, V1. In the earlier version, this element\nis not allowed." properties: status: type: "string" type: "object" sseKMSEncryptedObjects: - description: "A container for filter information for the selection of S3 objects encrypted with Amazon Web Services KMS." + description: "A container for filter information for the selection of S3 objects encrypted\nwith Amazon Web Services KMS." properties: status: type: "string" @@ -781,7 +781,7 @@ spec: type: "string" type: "object" redirectAllRequestsTo: - description: "Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket." + description: "Specifies the redirect behavior of all requests to a website endpoint of\nan Amazon S3 bucket." properties: hostName: type: "string" @@ -790,10 +790,10 @@ spec: type: "object" routingRules: items: - description: "Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) in the Amazon S3 User Guide." + description: "Specifies the redirect behavior and when a redirect is applied. For more\ninformation about routing rules, see Configuring advanced conditional redirects\n(https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects)\nin the Amazon S3 User Guide." properties: condition: - description: "A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error." + description: "A container for describing a condition that must be met for the specified\nredirect to apply. For example, 1. If request is for pages in the /docs folder,\nredirect to the /documents folder. 2. If request results in HTTP error 4xx,\nredirect request to another host where you might process the error." properties: httpErrorCodeReturnedEquals: type: "string" @@ -801,7 +801,7 @@ spec: type: "string" type: "object" redirect: - description: "Specifies how requests are redirected. In the event of an error, you can specify a different error code to return." + description: "Specifies how requests are redirected. In the event of an error, you can\nspecify a different error code to return." properties: hostName: type: "string" @@ -824,13 +824,13 @@ spec: description: "BucketStatus defines the observed state of Bucket" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -840,9 +840,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/apps.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/apps.yaml index d832346f6..4e62338c4 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/apps.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/apps.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "apps.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -23,10 +23,10 @@ spec: description: "App is the Schema for the Apps API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -43,7 +43,7 @@ spec: description: "The domain ID." type: "string" resourceSpec: - description: "The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. \n The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error." + description: "The instance type and the Amazon Resource Name (ARN) of the SageMaker image\ncreated on the instance.\n\n\nThe value of InstanceType passed as part of the ResourceSpec in the CreateApp\ncall overrides the value passed as part of the ResourceSpec configured for\nthe user profile or the domain. If InstanceType is not specified in any of\nthose three ResourceSpec values for a KernelGateway app, the CreateApp call\nfails with a request validation error." properties: instanceType: type: "string" @@ -55,9 +55,9 @@ spec: type: "string" type: "object" tags: - description: "Each tag consists of a key and an optional value. Tag keys must be unique per resource." + description: "Each tag consists of a key and an optional value. Tag keys must be unique\nper resource." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -77,13 +77,13 @@ spec: description: "AppStatus defines the observed state of App" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -93,9 +93,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/dataqualityjobdefinitions.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/dataqualityjobdefinitions.yaml index ed187d035..1eb9b12b1 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/dataqualityjobdefinitions.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/dataqualityjobdefinitions.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "dataqualityjobdefinitions.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "DataQualityJobDefinition is the Schema for the DataQualityJobDefinitions API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -70,7 +70,7 @@ spec: type: "object" type: "object" dataQualityJobInput: - description: "A list of inputs for the monitoring job. Currently endpoints are supported as monitoring inputs." + description: "A list of inputs for the monitoring job. Currently endpoints are supported\nas monitoring inputs." properties: endpointInput: description: "Input object for the endpoint" @@ -107,7 +107,7 @@ spec: description: "The output object for a monitoring job." properties: s3Output: - description: "Information about where and how you want to store the results of a monitoring job." + description: "Information about where and how you want to store the results of a monitoring\njob." properties: localPath: type: "string" @@ -148,7 +148,7 @@ spec: enableNetworkIsolation: type: "boolean" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -161,7 +161,7 @@ spec: type: "object" type: "object" roleARN: - description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf." + description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume\nto perform tasks on your behalf." type: "string" stoppingCondition: description: "A time limit for how long the monitoring job is allowed to run before stopping." @@ -171,9 +171,9 @@ spec: type: "integer" type: "object" tags: - description: "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide." + description: "(Optional) An array of key-value pairs. For more information, see Using Cost\nAllocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL)\nin the Amazon Web Services Billing and Cost Management User Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -193,13 +193,13 @@ spec: description: "DataQualityJobDefinitionStatus defines the observed state of DataQualityJobDefinition" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -209,9 +209,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/domains.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/domains.yaml index 719d00ddc..d45de5384 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/domains.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/domains.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "domains.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -26,10 +26,10 @@ spec: description: "Domain is the Schema for the Domains API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -37,16 +37,16 @@ spec: description: "DomainSpec defines the desired state of Domain." properties: appNetworkAccessType: - description: "Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. \n * PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access \n * VpcOnly - All Studio traffic is through the specified VPC and subnets" + description: "Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly.\n\n\n * PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon\n SageMaker, which allows direct internet access\n\n\n * VpcOnly - All Studio traffic is through the specified VPC and subnets" type: "string" appSecurityGroupManagement: - description: "The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided." + description: "The entity that creates and manages the required security groups for inter-app\ncommunication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType\nis VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn\nis provided." type: "string" authMode: description: "The mode of authentication that members use to access the domain." type: "string" defaultUserSettings: - description: "The default settings to use to create a user profile when UserSettings isn't specified in the call to the CreateUserProfile API. \n SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain." + description: "The default settings to use to create a user profile when UserSettings isn't\nspecified in the call to the CreateUserProfile API.\n\n\nSecurityGroups is aggregated when specified in both calls. For all other\nsettings in UserSettings, the values specified in CreateUserProfile take\nprecedence over those specified in CreateDomain." properties: executionRole: type: "string" @@ -54,7 +54,7 @@ spec: description: "The JupyterServer app settings." properties: defaultResourceSpec: - description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and\nthe instance type that the version runs on." properties: instanceType: type: "string" @@ -75,7 +75,7 @@ spec: properties: customImages: items: - description: "A custom SageMaker image. For more information, see Bring your own SageMaker image (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html)." + description: "A custom SageMaker image. For more information, see Bring your own SageMaker\nimage (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html)." properties: appImageConfigName: type: "string" @@ -87,7 +87,7 @@ spec: type: "object" type: "array" defaultResourceSpec: - description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and\nthe instance type that the version runs on." properties: instanceType: type: "string" @@ -104,7 +104,7 @@ spec: type: "array" type: "object" rStudioServerProAppSettings: - description: "A collection of settings that configure user interaction with the RStudioServerPro app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro app must be deleted and a new one created to make any changes." + description: "A collection of settings that configure user interaction with the RStudioServerPro\napp. RStudioServerProAppSettings cannot be updated. The RStudioServerPro\napp must be deleted and a new one created to make any changes." properties: accessStatus: type: "string" @@ -116,7 +116,7 @@ spec: type: "string" type: "array" sharingSettings: - description: "Specifies options for sharing SageMaker Studio notebooks. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called. When SharingSettings is not specified, notebook sharing isn't allowed." + description: "Specifies options for sharing SageMaker Studio notebooks. These settings\nare specified as part of DefaultUserSettings when the CreateDomain API is\ncalled, and as part of UserSettings when the CreateUserProfile API is called.\nWhen SharingSettings is not specified, notebook sharing isn't allowed." properties: notebookOutputOption: type: "string" @@ -129,7 +129,7 @@ spec: description: "The TensorBoard app settings." properties: defaultResourceSpec: - description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and\nthe instance type that the version runs on." properties: instanceType: type: "string" @@ -149,10 +149,10 @@ spec: description: "A collection of Domain settings." properties: rStudioServerProDomainSettings: - description: "A collection of settings that configure the RStudioServerPro Domain-level app." + description: "A collection of settings that configure the RStudioServerPro Domain-level\napp." properties: defaultResourceSpec: - description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and\nthe instance type that the version runs on." properties: instanceType: type: "string" @@ -179,7 +179,7 @@ spec: description: "Use KmsKeyId." type: "string" kmsKeyID: - description: "SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key." + description: "SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached\nto the domain with an Amazon Web Services managed key by default. For more\ncontrol, specify a customer managed key." type: "string" subnetIDs: description: "The VPC subnets that Studio uses for communication." @@ -187,9 +187,9 @@ spec: type: "string" type: "array" tags: - description: "Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. \n Tags that you specify for the Domain are also added to all Apps that the Domain launches." + description: "Tags to associated with the Domain. Each tag consists of a key and an optional\nvalue. Tag keys must be unique per resource. Tags are searchable using the\nSearch API.\n\n\nTags that you specify for the Domain are also added to all Apps that the\nDomain launches." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -211,13 +211,13 @@ spec: description: "DomainStatus defines the observed state of Domain" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -227,9 +227,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpointconfigs.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpointconfigs.yaml index 011e723fc..b103866d2 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpointconfigs.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpointconfigs.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "endpointconfigs.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "EndpointConfig is the Schema for the EndpointConfigs API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,10 +30,10 @@ spec: description: "EndpointConfigSpec defines the desired state of EndpointConfig." properties: asyncInferenceConfig: - description: "Specifies configuration for how an endpoint performs asynchronous inference. This is a required field in order for your Endpoint to be invoked using InvokeEndpointAsync (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpointAsync.html)." + description: "Specifies configuration for how an endpoint performs asynchronous inference.\nThis is a required field in order for your Endpoint to be invoked using InvokeEndpointAsync\n(https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpointAsync.html)." properties: clientConfig: - description: "Configures the behavior of the client used by SageMaker to interact with the model container during asynchronous inference." + description: "Configures the behavior of the client used by SageMaker to interact with\nthe model container during asynchronous inference." properties: maxConcurrentInvocationsPerInstance: format: "int64" @@ -45,7 +45,7 @@ spec: kmsKeyID: type: "string" notificationConfig: - description: "Specifies the configuration for notifications of inference results for asynchronous inference." + description: "Specifies the configuration for notifications of inference results for asynchronous\ninference." properties: errorTopic: type: "string" @@ -60,7 +60,7 @@ spec: description: "Configuration to control how SageMaker captures inference data." properties: captureContentTypeHeader: - description: "Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data." + description: "Configuration specifying how to treat different headers. If no headers are\nspecified SageMaker will by default base64 encode when capturing the data." properties: csvContentTypes: items: @@ -90,15 +90,15 @@ spec: type: "string" type: "object" endpointConfigName: - description: "The name of the endpoint configuration. You specify this name in a CreateEndpoint request." + description: "The name of the endpoint configuration. You specify this name in a CreateEndpoint\nrequest." type: "string" kmsKeyID: - description: "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. \n The KmsKeyId can be any of the following formats: \n * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab \n * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab \n * Alias name: alias/ExampleAlias \n * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias \n The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the Amazon Web Services Key Management Service section Using Key Policies in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) \n Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails. \n For a list of instance types that support local instance storage, see Instance Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). \n For more information about local instance storage encryption, see SSD Instance Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html)." + description: "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service\nkey that SageMaker uses to encrypt data on the storage volume attached to\nthe ML compute instance that hosts the endpoint.\n\n\nThe KmsKeyId can be any of the following formats:\n\n\n * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n\n\n * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n\n\n * Alias name: alias/ExampleAlias\n\n\n * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\n\n\nThe KMS key policy must grant permission to the IAM role that you specify\nin your CreateEndpoint, UpdateEndpoint requests. For more information, refer\nto the Amazon Web Services Key Management Service section Using Key Policies\nin Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)\n\n\nCertain Nitro-based instances include local storage, dependent on the instance\ntype. Local storage volumes are encrypted using a hardware module on the\ninstance. You can't request a KmsKeyId when using an instance type with local\nstorage. If any of the models that you specify in the ProductionVariants\nparameter use nitro-based instances with local storage, do not specify a\nvalue for the KmsKeyId parameter. If you specify a value for KmsKeyId when\nusing any nitro-based instances with local storage, the call to CreateEndpointConfig\nfails.\n\n\nFor a list of instance types that support local instance storage, see Instance\nStore Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes).\n\n\nFor more information about local instance storage encryption, see SSD Instance\nStore Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html)." type: "string" productionVariants: - description: "An array of ProductionVariant objects, one for each model that you want to host at this endpoint." + description: "An array of ProductionVariant objects, one for each model that you want to\nhost at this endpoint." items: - description: "Identifies a model that you want to host and the resources chosen to deploy for hosting it. If you are deploying multiple models, tell SageMaker how to distribute traffic among the models by specifying variant weights. For more information on production variants, check Production variants (https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html)." + description: "Identifies a model that you want to host and the resources chosen to deploy\nfor hosting it. If you are deploying multiple models, tell SageMaker how\nto distribute traffic among the models by specifying variant weights. For\nmore information on production variants, check Production variants (https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html)." properties: acceleratorType: type: "string" @@ -106,7 +106,7 @@ spec: format: "int64" type: "integer" coreDumpConfig: - description: "Specifies configuration for a core dump from the model container when the process crashes." + description: "Specifies configuration for a core dump from the model container when the\nprocess crashes." properties: destinationS3URI: type: "string" @@ -145,9 +145,9 @@ spec: type: "object" type: "array" tags: - description: "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." + description: "An array of key-value pairs. You can use tags to categorize your Amazon Web\nServices resources in different ways, for example, by purpose, owner, or\nenvironment. For more information, see Tagging Amazon Web Services Resources\n(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -163,13 +163,13 @@ spec: description: "EndpointConfigStatus defines the observed state of EndpointConfig" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -179,9 +179,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpoints.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpoints.yaml index b944f7d2f..f1fd747b1 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpoints.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/endpoints.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "endpoints.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -27,21 +27,21 @@ spec: description: "Endpoint is the Schema for the Endpoints API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "EndpointSpec defines the desired state of Endpoint. \n A hosted endpoint for real-time inference." + description: "EndpointSpec defines the desired state of Endpoint.\n\n\nA hosted endpoint for real-time inference." properties: deploymentConfig: - description: "The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations." + description: "The deployment configuration for an endpoint, which contains the desired\ndeployment strategy and rollback configurations." properties: autoRollbackConfiguration: - description: "Automatic rollback configuration for handling endpoint deployment failures and recovery." + description: "Automatic rollback configuration for handling endpoint deployment failures\nand recovery." properties: alarms: items: @@ -53,7 +53,7 @@ spec: type: "array" type: "object" blueGreenUpdatePolicy: - description: "Update policy for a blue/green deployment. If this update policy is specified, SageMaker creates a new fleet during the deployment while maintaining the old fleet. SageMaker flips traffic to the new fleet according to the specified traffic routing configuration. Only one update policy should be used in the deployment configuration. If no update policy is specified, SageMaker uses a blue/green deployment strategy with all at once traffic shifting by default." + description: "Update policy for a blue/green deployment. If this update policy is specified,\nSageMaker creates a new fleet during the deployment while maintaining the\nold fleet. SageMaker flips traffic to the new fleet according to the specified\ntraffic routing configuration. Only one update policy should be used in the\ndeployment configuration. If no update policy is specified, SageMaker uses\na blue/green deployment strategy with all at once traffic shifting by default." properties: maximumExecutionTimeoutInSeconds: format: "int64" @@ -62,7 +62,7 @@ spec: format: "int64" type: "integer" trafficRoutingConfiguration: - description: "Defines the traffic routing strategy during an endpoint deployment to shift traffic from the old fleet to the new fleet." + description: "Defines the traffic routing strategy during an endpoint deployment to shift\ntraffic from the old fleet to the new fleet." properties: canarySize: description: "Specifies the endpoint capacity to activate for production." @@ -94,12 +94,12 @@ spec: description: "The name of an endpoint configuration. For more information, see CreateEndpointConfig." type: "string" endpointName: - description: "The name of the endpoint.The name must be unique within an Amazon Web Services Region in your Amazon Web Services account. The name is case-insensitive in CreateEndpoint, but the case is preserved and must be matched in ." + description: "The name of the endpoint.The name must be unique within an Amazon Web Services\nRegion in your Amazon Web Services account. The name is case-insensitive\nin CreateEndpoint, but the case is preserved and must be matched in ." type: "string" tags: - description: "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." + description: "An array of key-value pairs. You can use tags to categorize your Amazon Web\nServices resources in different ways, for example, by purpose, owner, or\nenvironment. For more information, see Tagging Amazon Web Services Resources\n(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -115,13 +115,13 @@ spec: description: "EndpointStatus defines the observed state of Endpoint" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -131,9 +131,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -161,7 +161,7 @@ spec: format: "date-time" type: "string" endpointStatus: - description: "The status of the endpoint. \n * OutOfService: Endpoint is not available to take incoming requests. \n * Creating: CreateEndpoint is executing. \n * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. \n * SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or deleted or re-scaled until it has completed. This maintenance operation does not change any customer-specified values such as VPC config, KMS encryption, model, instance type, or instance count. \n * RollingBack: Endpoint fails to scale up or down or change its variant weight and is in the process of rolling back to its previous configuration. Once the rollback completes, endpoint returns to an InService status. This transitional status only applies to an endpoint that has autoscaling enabled and is undergoing variant weight or capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called explicitly. \n * InService: Endpoint is available to process incoming requests. \n * Deleting: DeleteEndpoint is executing. \n * Failed: Endpoint could not be created, updated, or re-scaled. Use DescribeEndpointOutput$FailureReason for information about the failure. DeleteEndpoint is the only operation that can be performed on a failed endpoint." + description: "The status of the endpoint.\n\n\n * OutOfService: Endpoint is not available to take incoming requests.\n\n\n * Creating: CreateEndpoint is executing.\n\n\n * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.\n\n\n * SystemUpdating: Endpoint is undergoing maintenance and cannot be updated\n or deleted or re-scaled until it has completed. This maintenance operation\n does not change any customer-specified values such as VPC config, KMS\n encryption, model, instance type, or instance count.\n\n\n * RollingBack: Endpoint fails to scale up or down or change its variant\n weight and is in the process of rolling back to its previous configuration.\n Once the rollback completes, endpoint returns to an InService status.\n This transitional status only applies to an endpoint that has autoscaling\n enabled and is undergoing variant weight or capacity changes as part of\n an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities\n operation is called explicitly.\n\n\n * InService: Endpoint is available to process incoming requests.\n\n\n * Deleting: DeleteEndpoint is executing.\n\n\n * Failed: Endpoint could not be created, updated, or re-scaled. Use DescribeEndpointOutput$FailureReason\n for information about the failure. DeleteEndpoint is the only operation\n that can be performed on a failed endpoint." type: "string" failureReason: description: "If the status of the endpoint is Failed, the reason why it failed." @@ -171,13 +171,13 @@ spec: format: "date-time" type: "string" pendingDeploymentSummary: - description: "Returns the summary of an in-progress deployment. This field is only returned when the endpoint is creating or updating with a new endpoint configuration." + description: "Returns the summary of an in-progress deployment. This field is only returned\nwhen the endpoint is creating or updating with a new endpoint configuration." properties: endpointConfigName: type: "string" productionVariants: items: - description: "The production variant summary for a deployment when an endpoint is creating or updating with the CreateEndpoint or UpdateEndpoint operations. Describes the VariantStatus , weight and capacity for a production variant associated with an endpoint." + description: "The production variant summary for a deployment when an endpoint is creating\nor updating with the CreateEndpoint or UpdateEndpoint operations. Describes\nthe VariantStatus , weight and capacity for a production variant associated\nwith an endpoint." properties: acceleratorType: type: "string" @@ -198,7 +198,7 @@ spec: type: "number" deployedImages: items: - description: "Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant. \n If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) in the Amazon ECR User Guide." + description: "Gets the Amazon EC2 Container Registry path of the docker image of the model\nthat is hosted in this ProductionVariant.\n\n\nIf you used the registry/repository[:tag] form to specify the image path\nof the primary container when you created the model hosted in this ProductionVariant,\nthe path resolves to a path of the form registry/repository[@digest]. A digest\nis a hash value that identifies a specific version of an image. For information\nabout Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html)\nin the Amazon ECR User Guide." properties: resolutionTime: format: "date-time" @@ -248,9 +248,9 @@ spec: type: "string" type: "object" productionVariants: - description: "An array of ProductionVariantSummary objects, one for each model hosted behind this endpoint." + description: "An array of ProductionVariantSummary objects, one for each model hosted behind\nthis endpoint." items: - description: "Describes weight and capacities for a production variant associated with an endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities API and the endpoint status is Updating, you get different desired and current values." + description: "Describes weight and capacities for a production variant associated with\nan endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities\nAPI and the endpoint status is Updating, you get different desired and current\nvalues." properties: currentInstanceCount: format: "int64" @@ -269,7 +269,7 @@ spec: type: "number" deployedImages: items: - description: "Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant. \n If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) in the Amazon ECR User Guide." + description: "Gets the Amazon EC2 Container Registry path of the docker image of the model\nthat is hosted in this ProductionVariant.\n\n\nIf you used the registry/repository[:tag] form to specify the image path\nof the primary container when you created the model hosted in this ProductionVariant,\nthe path resolves to a path of the form registry/repository[@digest]. A digest\nis a hash value that identifies a specific version of an image. For information\nabout Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html)\nin the Amazon ECR User Guide." properties: resolutionTime: format: "date-time" diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/featuregroups.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/featuregroups.yaml index 217cf20f0..d864d9cca 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/featuregroups.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/featuregroups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "featuregroups.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -27,26 +27,26 @@ spec: description: "FeatureGroup is the Schema for the FeatureGroups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "FeatureGroupSpec defines the desired state of FeatureGroup. \n Amazon SageMaker Feature Store stores features in a collection called Feature Group. A Feature Group can be visualized as a table which has rows, with a unique identifier for each row where each column in the table is a feature. In principle, a Feature Group is composed of features and values per features." + description: "FeatureGroupSpec defines the desired state of FeatureGroup.\n\n\nAmazon SageMaker Feature Store stores features in a collection called Feature\nGroup. A Feature Group can be visualized as a table which has rows, with\na unique identifier for each row where each column in the table is a feature.\nIn principle, a Feature Group is composed of features and values per features." properties: description: description: "A free-form description of a FeatureGroup." type: "string" eventTimeFeatureName: - description: "The name of the feature that stores the EventTime of a Record in a FeatureGroup. \n An EventTime is a point in time when a new event occurs that corresponds to the creation or update of a Record in a FeatureGroup. All Records in the FeatureGroup must have a corresponding EventTime. \n An EventTime can be a String or Fractional. \n * Fractional: EventTime feature values must be a Unix timestamp in seconds. \n * String: EventTime feature values must be an ISO-8601 string in the format. The following formats are supported yyyy-MM-dd'T'HH:mm:ssZ and yyyy-MM-dd'T'HH:mm:ss.SSSZ where yyyy, MM, and dd represent the year, month, and day respectively and HH, mm, ss, and if applicable, SSS represent the hour, month, second and milliseconds respsectively. 'T' and Z are constants." + description: "The name of the feature that stores the EventTime of a Record in a FeatureGroup.\n\n\nAn EventTime is a point in time when a new event occurs that corresponds\nto the creation or update of a Record in a FeatureGroup. All Records in the\nFeatureGroup must have a corresponding EventTime.\n\n\nAn EventTime can be a String or Fractional.\n\n\n * Fractional: EventTime feature values must be a Unix timestamp in seconds.\n\n\n * String: EventTime feature values must be an ISO-8601 string in the format.\n The following formats are supported yyyy-MM-dd'T'HH:mm:ssZ and yyyy-MM-dd'T'HH:mm:ss.SSSZ\n where yyyy, MM, and dd represent the year, month, and day respectively\n and HH, mm, ss, and if applicable, SSS represent the hour, month, second\n and milliseconds respsectively. 'T' and Z are constants." type: "string" featureDefinitions: - description: "A list of Feature names and types. Name and Type is compulsory per Feature. \n Valid feature FeatureTypes are Integral, Fractional and String. \n FeatureNames cannot be any of the following: is_deleted, write_time, api_invocation_time \n You can create up to 2,500 FeatureDefinitions per FeatureGroup." + description: "A list of Feature names and types. Name and Type is compulsory per Feature.\n\n\nValid feature FeatureTypes are Integral, Fractional and String.\n\n\nFeatureNames cannot be any of the following: is_deleted, write_time, api_invocation_time\n\n\nYou can create up to 2,500 FeatureDefinitions per FeatureGroup." items: - description: "A list of features. You must include FeatureName and FeatureType. Valid feature FeatureTypes are Integral, Fractional and String." + description: "A list of features. You must include FeatureName and FeatureType. Valid feature\nFeatureTypes are Integral, Fractional and String." properties: featureName: type: "string" @@ -55,10 +55,10 @@ spec: type: "object" type: "array" featureGroupName: - description: "The name of the FeatureGroup. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account. The name: \n * Must start and end with an alphanumeric character. \n * Can only contain alphanumeric character and hyphens. Spaces are not allowed." + description: "The name of the FeatureGroup. The name must be unique within an Amazon Web\nServices Region in an Amazon Web Services account. The name:\n\n\n * Must start and end with an alphanumeric character.\n\n\n * Can only contain alphanumeric character and hyphens. Spaces are not\n allowed." type: "string" offlineStoreConfig: - description: "Use this to configure an OfflineFeatureStore. This parameter allows you to specify: \n * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore. \n * A configuration for an Amazon Web Services Glue or Amazon Web Services Hive data catalog. \n * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore. If KMS encryption key is not specified, by default we encrypt all data at rest using Amazon Web Services KMS key. By defining your bucket-level key (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html) for SSE, you can reduce Amazon Web Services KMS requests costs by up to 99 percent. \n * Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). \n To learn more about this parameter, see OfflineStoreConfig." + description: "Use this to configure an OfflineFeatureStore. This parameter allows you to\nspecify:\n\n\n * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore.\n\n\n * A configuration for an Amazon Web Services Glue or Amazon Web Services\n Hive data catalog.\n\n\n * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore.\n If KMS encryption key is not specified, by default we encrypt all data\n at rest using Amazon Web Services KMS key. By defining your bucket-level\n key (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html)\n for SSE, you can reduce Amazon Web Services KMS requests costs by up to\n 99 percent.\n\n\n * Format for the offline store table. Supported formats are Glue (Default)\n and Apache Iceberg (https://iceberg.apache.org/).\n\n\nTo learn more about this parameter, see OfflineStoreConfig." properties: dataCatalogConfig: description: "The meta data of the Glue table which serves as data catalog for the OfflineStore." @@ -73,7 +73,7 @@ spec: disableGlueTableCreation: type: "boolean" s3StorageConfig: - description: "The Amazon Simple Storage (Amazon S3) location and and security configuration for OfflineStore." + description: "The Amazon Simple Storage (Amazon S3) location and and security configuration\nfor OfflineStore." properties: kmsKeyID: type: "string" @@ -84,7 +84,7 @@ spec: type: "object" type: "object" onlineStoreConfig: - description: "You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore flag in OnlineStoreConfig; the default value is False. \n You can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest encryption of the OnlineStore." + description: "You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore\nflag in OnlineStoreConfig; the default value is False.\n\n\nYou can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest\nencryption of the OnlineStore." properties: enableOnlineStore: type: "boolean" @@ -96,15 +96,15 @@ spec: type: "object" type: "object" recordIdentifierFeatureName: - description: "The name of the Feature whose value uniquely identifies a Record defined in the FeatureStore. Only the latest record per identifier value will be stored in the OnlineStore. RecordIdentifierFeatureName must be one of feature definitions' names. \n You use the RecordIdentifierFeatureName to access data in a FeatureStore. \n This name: \n * Must start and end with an alphanumeric character. \n * Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed." + description: "The name of the Feature whose value uniquely identifies a Record defined\nin the FeatureStore. Only the latest record per identifier value will be\nstored in the OnlineStore. RecordIdentifierFeatureName must be one of feature\ndefinitions' names.\n\n\nYou use the RecordIdentifierFeatureName to access data in a FeatureStore.\n\n\nThis name:\n\n\n * Must start and end with an alphanumeric character.\n\n\n * Can only contains alphanumeric characters, hyphens, underscores. Spaces\n are not allowed." type: "string" roleARN: - description: "The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided." + description: "The Amazon Resource Name (ARN) of the IAM execution role used to persist\ndata into the OfflineStore if an OfflineStoreConfig is provided." type: "string" tags: description: "Tags used to identify Features in each FeatureGroup." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -122,13 +122,13 @@ spec: description: "FeatureGroupStatus defines the observed state of FeatureGroup" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -138,9 +138,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -164,7 +164,7 @@ spec: type: "object" type: "array" failureReason: - description: "The reason that the FeatureGroup failed to be replicated in the OfflineStore. This is failure can occur because: \n * The FeatureGroup could not be created in the OfflineStore. \n * The FeatureGroup could not be deleted from the OfflineStore." + description: "The reason that the FeatureGroup failed to be replicated in the OfflineStore.\nThis is failure can occur because:\n\n\n * The FeatureGroup could not be created in the OfflineStore.\n\n\n * The FeatureGroup could not be deleted from the OfflineStore." type: "string" featureGroupStatus: description: "The status of the feature group." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/hyperparametertuningjobs.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/hyperparametertuningjobs.yaml index ea4b6e677..45a792a5d 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/hyperparametertuningjobs.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/hyperparametertuningjobs.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "hyperparametertuningjobs.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -27,10 +27,10 @@ spec: description: "HyperParameterTuningJob is the Schema for the HyperParameterTuningJobs API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -38,10 +38,10 @@ spec: description: "HyperParameterTuningJobSpec defines the desired state of HyperParameterTuningJob." properties: hyperParameterTuningJobConfig: - description: "The HyperParameterTuningJobConfig object that describes the tuning job, including the search strategy, the objective metric used to evaluate training jobs, ranges of parameters to search, and resource limits for the tuning job. For more information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html)." + description: "The HyperParameterTuningJobConfig object that describes the tuning job, including\nthe search strategy, the objective metric used to evaluate training jobs,\nranges of parameters to search, and resource limits for the tuning job. For\nmore information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html)." properties: hyperParameterTuningJobObjective: - description: "Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter." + description: "Defines the objective metric for a hyperparameter tuning job. Hyperparameter\ntuning uses the value of this metric to evaluate the training jobs it launches,\nand returns the training job that results in either the highest or lowest\nvalue for this metric, depending on the value you specify for the Type parameter." properties: metricName: type: "string" @@ -49,7 +49,7 @@ spec: type: "string" type: "object" parameterRanges: - description: "Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. \n The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified." + description: "Specifies ranges of integer, continuous, and categorical hyperparameters\nthat a hyperparameter tuning job searches. The hyperparameter tuning job\nlaunches training jobs with hyperparameter values within these ranges to\nfind the combination of values that result in the training job with the best\nperformance as measured by the objective metric of the hyperparameter tuning\njob.\n\n\nThe maximum number of items specified for Array Members refers to the maximum\nnumber of hyperparameters for each range and also the maximum for the hyperparameter\ntuning job itself. That is, the sum of the number of hyperparameters for\nall the ranges can't exceed the maximum number specified." properties: categoricalParameterRanges: items: @@ -79,7 +79,7 @@ spec: type: "array" integerParameterRanges: items: - description: "For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches." + description: "For a hyperparameter of the integer type, specifies the range that a hyperparameter\ntuning job searches." properties: maxValue: type: "string" @@ -93,7 +93,7 @@ spec: type: "array" type: "object" resourceLimits: - description: "Specifies the maximum number of training jobs and parallel training jobs that a hyperparameter tuning job can launch." + description: "Specifies the maximum number of training jobs and parallel training jobs\nthat a hyperparameter tuning job can launch." properties: maxNumberOfTrainingJobs: format: "int64" @@ -103,7 +103,7 @@ spec: type: "integer" type: "object" strategy: - description: "The strategy hyperparameter tuning uses to find the best combination of hyperparameters for your model." + description: "The strategy hyperparameter tuning uses to find the best combination of hyperparameters\nfor your model." type: "string" trainingJobEarlyStoppingType: type: "string" @@ -115,12 +115,12 @@ spec: type: "object" type: "object" hyperParameterTuningJobName: - description: "The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same Amazon Web Services account and Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive." + description: "The name of the tuning job. This name is the prefix for the names of all\ntraining jobs that this tuning job launches. The name must be unique within\nthe same Amazon Web Services account and Amazon Web Services Region. The\nname must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and\n: + = @ _ % - (hyphen). The name is not case sensitive." type: "string" tags: - description: "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). \n Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches." + description: "An array of key-value pairs. You can use tags to categorize your Amazon Web\nServices resources in different ways, for example, by purpose, owner, or\nenvironment. For more information, see Tagging Amazon Web Services Resources\n(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\n\n\nTags that you specify for the tuning job are also added to all training jobs\nthat the tuning job launches." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -129,16 +129,16 @@ spec: type: "object" type: "array" trainingJobDefinition: - description: "The HyperParameterTrainingJobDefinition object that describes the training jobs that this tuning job launches, including static hyperparameters, input data configuration, output data configuration, resource configuration, and stopping condition." + description: "The HyperParameterTrainingJobDefinition object that describes the training\njobs that this tuning job launches, including static hyperparameters, input\ndata configuration, output data configuration, resource configuration, and\nstopping condition." properties: algorithmSpecification: - description: "Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor." + description: "Specifies which training algorithm to use for training jobs that a hyperparameter\ntuning job launches and the metrics to monitor." properties: algorithmName: type: "string" metricDefinitions: items: - description: "Specifies a metric that the training algorithm writes to stderr or stdout. SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job." + description: "Specifies a metric that the training algorithm writes to stderr or stdout.\nSageMakerhyperparameter tuning captures all defined metrics. You specify\none metric that a hyperparameter tuning job uses as its objective metric\nto choose the best training job." properties: name: type: "string" @@ -149,11 +149,11 @@ spec: trainingImage: type: "string" trainingInputMode: - description: "The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). \n Pipe mode \n If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. \n File mode \n If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. \n You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. \n For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. \n FastFile mode \n If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. \n FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided." + description: "The training input mode that the algorithm supports. For more information\nabout input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\n\n\nPipe mode\n\n\nIf an algorithm supports Pipe mode, Amazon SageMaker streams data directly\nfrom Amazon S3 to the container.\n\n\nFile mode\n\n\nIf an algorithm supports File mode, SageMaker downloads the training data\nfrom S3 to the provisioned ML storage volume, and mounts the directory to\nthe Docker volume for the training container.\n\n\nYou must provision the ML storage volume with sufficient capacity to accommodate\nthe data downloaded from S3. In addition to the training data, the ML storage\nvolume also stores the output model. The algorithm container uses the ML\nstorage volume to also store intermediate information, if any.\n\n\nFor distributed algorithms, training data is distributed uniformly. Your\ntraining duration is predictable if the input data objects sizes are approximately\nthe same. SageMaker does not split the files any further for model training.\nIf the object sizes are skewed, training won't be optimal as the data distribution\nis also skewed when one host in a training cluster is overloaded, thus becoming\na bottleneck in training.\n\n\nFastFile mode\n\n\nIf an algorithm supports FastFile mode, SageMaker streams data directly from\nS3 to the container with no code changes, and provides file system access\nto the data. Users can author their training script to interact with these\nfiles as if they were stored on disk.\n\n\nFastFile mode works best when the data is read sequentially. Augmented manifest\nfiles aren't supported. The startup time is lower when there are fewer files\nin the S3 bucket provided." type: "string" type: "object" checkpointConfig: - description: "Contains information about the output location for managed spot training checkpoint data." + description: "Contains information about the output location for managed spot training\ncheckpoint data." properties: localPath: type: "string" @@ -169,7 +169,7 @@ spec: enableNetworkIsolation: type: "boolean" hyperParameterRanges: - description: "Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. \n The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified." + description: "Specifies ranges of integer, continuous, and categorical hyperparameters\nthat a hyperparameter tuning job searches. The hyperparameter tuning job\nlaunches training jobs with hyperparameter values within these ranges to\nfind the combination of values that result in the training job with the best\nperformance as measured by the objective metric of the hyperparameter tuning\njob.\n\n\nThe maximum number of items specified for Array Members refers to the maximum\nnumber of hyperparameters for each range and also the maximum for the hyperparameter\ntuning job itself. That is, the sum of the number of hyperparameters for\nall the ranges can't exceed the maximum number specified." properties: categoricalParameterRanges: items: @@ -199,7 +199,7 @@ spec: type: "array" integerParameterRanges: items: - description: "For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches." + description: "For a hyperparameter of the integer type, specifies the range that a hyperparameter\ntuning job searches." properties: maxValue: type: "string" @@ -257,12 +257,12 @@ spec: type: "object" type: "object" inputMode: - description: "The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). \n Pipe mode \n If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. \n File mode \n If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. \n You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. \n For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. \n FastFile mode \n If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. \n FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided." + description: "The training input mode that the algorithm supports. For more information\nabout input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\n\n\nPipe mode\n\n\nIf an algorithm supports Pipe mode, Amazon SageMaker streams data directly\nfrom Amazon S3 to the container.\n\n\nFile mode\n\n\nIf an algorithm supports File mode, SageMaker downloads the training data\nfrom S3 to the provisioned ML storage volume, and mounts the directory to\nthe Docker volume for the training container.\n\n\nYou must provision the ML storage volume with sufficient capacity to accommodate\nthe data downloaded from S3. In addition to the training data, the ML storage\nvolume also stores the output model. The algorithm container uses the ML\nstorage volume to also store intermediate information, if any.\n\n\nFor distributed algorithms, training data is distributed uniformly. Your\ntraining duration is predictable if the input data objects sizes are approximately\nthe same. SageMaker does not split the files any further for model training.\nIf the object sizes are skewed, training won't be optimal as the data distribution\nis also skewed when one host in a training cluster is overloaded, thus becoming\na bottleneck in training.\n\n\nFastFile mode\n\n\nIf an algorithm supports FastFile mode, SageMaker streams data directly from\nS3 to the container with no code changes, and provides file system access\nto the data. Users can author their training script to interact with these\nfiles as if they were stored on disk.\n\n\nFastFile mode works best when the data is read sequentially. Augmented manifest\nfiles aren't supported. The startup time is lower when there are fewer files\nin the S3 bucket provided." type: "string" recordWrapperType: type: "string" shuffleConfig: - description: "A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. \n For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch." + description: "A configuration for a shuffle option for input data in a channel. If you\nuse S3Prefix for S3DataType, the results of the S3 key prefix matches are\nshuffled. If you use ManifestFile, the order of the S3 object references\nin the ManifestFile is shuffled. If you use AugmentedManifestFile, the order\nof the JSON lines in the AugmentedManifestFile is shuffled. The shuffling\norder is determined using the Seed value.\n\n\nFor Pipe input mode, when ShuffleConfig is specified shuffling is done at\nthe start of every epoch. With large datasets, this ensures that the order\nof the training data is different for each epoch, and it helps reduce bias\nand possible overfitting. In a multi-node training job when ShuffleConfig\nis combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled\nacross nodes so that the content sent to a particular node on the first epoch\nmight be sent to a different node on the second epoch." properties: seed: format: "int64" @@ -279,14 +279,14 @@ spec: type: "string" type: "object" resourceConfig: - description: "Describes the resources, including machine learning (ML) compute instances and ML storage volumes, to use for model training." + description: "Describes the resources, including machine learning (ML) compute instances\nand ML storage volumes, to use for model training." properties: instanceCount: format: "int64" type: "integer" instanceGroups: items: - description: "Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) API, you can configure multiple instance groups ." + description: "Defines an instance group for heterogeneous cluster training. When requesting\na training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html)\nAPI, you can configure multiple instance groups ." properties: instanceCount: format: "int64" @@ -309,7 +309,7 @@ spec: type: "integer" type: "object" retryStrategy: - description: "The retry strategy to use when a training job fails due to an InternalServerError. RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob requests. You can add the StoppingCondition parameter to the request to limit the training time for the complete job." + description: "The retry strategy to use when a training job fails due to an InternalServerError.\nRetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob\nrequests. You can add the StoppingCondition parameter to the request to limit\nthe training time for the complete job." properties: maximumRetryAttempts: format: "int64" @@ -322,7 +322,7 @@ spec: type: "string" type: "object" stoppingCondition: - description: "Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs. \n To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. \n The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel. \n The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete." + description: "Specifies a limit to how long a model training job or model compilation job\ncan run. It also specifies how long a managed spot training job has to complete.\nWhen the job reaches the time limit, SageMaker ends the training or compilation\njob. Use this API to cap model training costs.\n\n\nTo stop a training job, SageMaker sends the algorithm the SIGTERM signal,\nwhich delays job termination for 120 seconds. Algorithms can use this 120-second\nwindow to save the model artifacts, so the results of training are not lost.\n\n\nThe training algorithms provided by SageMaker automatically save the intermediate\nresults of a model training job when possible. This attempt to save artifacts\nis only a best effort case as model might not be in a state from which it\ncan be saved. For example, if training has just started, the model might\nnot be ready to save. When saved, this intermediate data is a valid model\nartifact. You can use it to create a model with CreateModel.\n\n\nThe Neural Topic Model (NTM) currently does not support saving intermediate\nmodel artifacts. When training NTMs, make sure that the maximum runtime is\nsufficient for the training job to complete." properties: maxRuntimeInSeconds: format: "int64" @@ -332,7 +332,7 @@ spec: type: "integer" type: "object" tuningObjective: - description: "Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter." + description: "Defines the objective metric for a hyperparameter tuning job. Hyperparameter\ntuning uses the value of this metric to evaluate the training jobs it launches,\nand returns the training job that results in either the highest or lowest\nvalue for this metric, depending on the value you specify for the Type parameter." properties: metricName: type: "string" @@ -340,7 +340,7 @@ spec: type: "string" type: "object" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -353,18 +353,18 @@ spec: type: "object" type: "object" trainingJobDefinitions: - description: "A list of the HyperParameterTrainingJobDefinition objects launched for this tuning job." + description: "A list of the HyperParameterTrainingJobDefinition objects launched for this\ntuning job." items: description: "Defines the training jobs launched by a hyperparameter tuning job." properties: algorithmSpecification: - description: "Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor." + description: "Specifies which training algorithm to use for training jobs that a hyperparameter\ntuning job launches and the metrics to monitor." properties: algorithmName: type: "string" metricDefinitions: items: - description: "Specifies a metric that the training algorithm writes to stderr or stdout. SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job." + description: "Specifies a metric that the training algorithm writes to stderr or stdout.\nSageMakerhyperparameter tuning captures all defined metrics. You specify\none metric that a hyperparameter tuning job uses as its objective metric\nto choose the best training job." properties: name: type: "string" @@ -375,11 +375,11 @@ spec: trainingImage: type: "string" trainingInputMode: - description: "The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). \n Pipe mode \n If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. \n File mode \n If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. \n You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. \n For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. \n FastFile mode \n If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. \n FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided." + description: "The training input mode that the algorithm supports. For more information\nabout input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\n\n\nPipe mode\n\n\nIf an algorithm supports Pipe mode, Amazon SageMaker streams data directly\nfrom Amazon S3 to the container.\n\n\nFile mode\n\n\nIf an algorithm supports File mode, SageMaker downloads the training data\nfrom S3 to the provisioned ML storage volume, and mounts the directory to\nthe Docker volume for the training container.\n\n\nYou must provision the ML storage volume with sufficient capacity to accommodate\nthe data downloaded from S3. In addition to the training data, the ML storage\nvolume also stores the output model. The algorithm container uses the ML\nstorage volume to also store intermediate information, if any.\n\n\nFor distributed algorithms, training data is distributed uniformly. Your\ntraining duration is predictable if the input data objects sizes are approximately\nthe same. SageMaker does not split the files any further for model training.\nIf the object sizes are skewed, training won't be optimal as the data distribution\nis also skewed when one host in a training cluster is overloaded, thus becoming\na bottleneck in training.\n\n\nFastFile mode\n\n\nIf an algorithm supports FastFile mode, SageMaker streams data directly from\nS3 to the container with no code changes, and provides file system access\nto the data. Users can author their training script to interact with these\nfiles as if they were stored on disk.\n\n\nFastFile mode works best when the data is read sequentially. Augmented manifest\nfiles aren't supported. The startup time is lower when there are fewer files\nin the S3 bucket provided." type: "string" type: "object" checkpointConfig: - description: "Contains information about the output location for managed spot training checkpoint data." + description: "Contains information about the output location for managed spot training\ncheckpoint data." properties: localPath: type: "string" @@ -395,7 +395,7 @@ spec: enableNetworkIsolation: type: "boolean" hyperParameterRanges: - description: "Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. \n The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified." + description: "Specifies ranges of integer, continuous, and categorical hyperparameters\nthat a hyperparameter tuning job searches. The hyperparameter tuning job\nlaunches training jobs with hyperparameter values within these ranges to\nfind the combination of values that result in the training job with the best\nperformance as measured by the objective metric of the hyperparameter tuning\njob.\n\n\nThe maximum number of items specified for Array Members refers to the maximum\nnumber of hyperparameters for each range and also the maximum for the hyperparameter\ntuning job itself. That is, the sum of the number of hyperparameters for\nall the ranges can't exceed the maximum number specified." properties: categoricalParameterRanges: items: @@ -425,7 +425,7 @@ spec: type: "array" integerParameterRanges: items: - description: "For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches." + description: "For a hyperparameter of the integer type, specifies the range that a hyperparameter\ntuning job searches." properties: maxValue: type: "string" @@ -483,12 +483,12 @@ spec: type: "object" type: "object" inputMode: - description: "The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). \n Pipe mode \n If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. \n File mode \n If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. \n You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. \n For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. \n FastFile mode \n If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. \n FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided." + description: "The training input mode that the algorithm supports. For more information\nabout input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\n\n\nPipe mode\n\n\nIf an algorithm supports Pipe mode, Amazon SageMaker streams data directly\nfrom Amazon S3 to the container.\n\n\nFile mode\n\n\nIf an algorithm supports File mode, SageMaker downloads the training data\nfrom S3 to the provisioned ML storage volume, and mounts the directory to\nthe Docker volume for the training container.\n\n\nYou must provision the ML storage volume with sufficient capacity to accommodate\nthe data downloaded from S3. In addition to the training data, the ML storage\nvolume also stores the output model. The algorithm container uses the ML\nstorage volume to also store intermediate information, if any.\n\n\nFor distributed algorithms, training data is distributed uniformly. Your\ntraining duration is predictable if the input data objects sizes are approximately\nthe same. SageMaker does not split the files any further for model training.\nIf the object sizes are skewed, training won't be optimal as the data distribution\nis also skewed when one host in a training cluster is overloaded, thus becoming\na bottleneck in training.\n\n\nFastFile mode\n\n\nIf an algorithm supports FastFile mode, SageMaker streams data directly from\nS3 to the container with no code changes, and provides file system access\nto the data. Users can author their training script to interact with these\nfiles as if they were stored on disk.\n\n\nFastFile mode works best when the data is read sequentially. Augmented manifest\nfiles aren't supported. The startup time is lower when there are fewer files\nin the S3 bucket provided." type: "string" recordWrapperType: type: "string" shuffleConfig: - description: "A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. \n For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch." + description: "A configuration for a shuffle option for input data in a channel. If you\nuse S3Prefix for S3DataType, the results of the S3 key prefix matches are\nshuffled. If you use ManifestFile, the order of the S3 object references\nin the ManifestFile is shuffled. If you use AugmentedManifestFile, the order\nof the JSON lines in the AugmentedManifestFile is shuffled. The shuffling\norder is determined using the Seed value.\n\n\nFor Pipe input mode, when ShuffleConfig is specified shuffling is done at\nthe start of every epoch. With large datasets, this ensures that the order\nof the training data is different for each epoch, and it helps reduce bias\nand possible overfitting. In a multi-node training job when ShuffleConfig\nis combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled\nacross nodes so that the content sent to a particular node on the first epoch\nmight be sent to a different node on the second epoch." properties: seed: format: "int64" @@ -505,14 +505,14 @@ spec: type: "string" type: "object" resourceConfig: - description: "Describes the resources, including machine learning (ML) compute instances and ML storage volumes, to use for model training." + description: "Describes the resources, including machine learning (ML) compute instances\nand ML storage volumes, to use for model training." properties: instanceCount: format: "int64" type: "integer" instanceGroups: items: - description: "Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) API, you can configure multiple instance groups ." + description: "Defines an instance group for heterogeneous cluster training. When requesting\na training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html)\nAPI, you can configure multiple instance groups ." properties: instanceCount: format: "int64" @@ -535,7 +535,7 @@ spec: type: "integer" type: "object" retryStrategy: - description: "The retry strategy to use when a training job fails due to an InternalServerError. RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob requests. You can add the StoppingCondition parameter to the request to limit the training time for the complete job." + description: "The retry strategy to use when a training job fails due to an InternalServerError.\nRetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob\nrequests. You can add the StoppingCondition parameter to the request to limit\nthe training time for the complete job." properties: maximumRetryAttempts: format: "int64" @@ -548,7 +548,7 @@ spec: type: "string" type: "object" stoppingCondition: - description: "Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs. \n To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. \n The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel. \n The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete." + description: "Specifies a limit to how long a model training job or model compilation job\ncan run. It also specifies how long a managed spot training job has to complete.\nWhen the job reaches the time limit, SageMaker ends the training or compilation\njob. Use this API to cap model training costs.\n\n\nTo stop a training job, SageMaker sends the algorithm the SIGTERM signal,\nwhich delays job termination for 120 seconds. Algorithms can use this 120-second\nwindow to save the model artifacts, so the results of training are not lost.\n\n\nThe training algorithms provided by SageMaker automatically save the intermediate\nresults of a model training job when possible. This attempt to save artifacts\nis only a best effort case as model might not be in a state from which it\ncan be saved. For example, if training has just started, the model might\nnot be ready to save. When saved, this intermediate data is a valid model\nartifact. You can use it to create a model with CreateModel.\n\n\nThe Neural Topic Model (NTM) currently does not support saving intermediate\nmodel artifacts. When training NTMs, make sure that the maximum runtime is\nsufficient for the training job to complete." properties: maxRuntimeInSeconds: format: "int64" @@ -558,7 +558,7 @@ spec: type: "integer" type: "object" tuningObjective: - description: "Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter." + description: "Defines the objective metric for a hyperparameter tuning job. Hyperparameter\ntuning uses the value of this metric to evaluate the training jobs it launches,\nand returns the training job that results in either the highest or lowest\nvalue for this metric, depending on the value you specify for the Type parameter." properties: metricName: type: "string" @@ -566,7 +566,7 @@ spec: type: "string" type: "object" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -580,11 +580,11 @@ spec: type: "object" type: "array" warmStartConfig: - description: "Specifies the configuration for starting the hyperparameter tuning job using one or more previous tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job. \n All training jobs launched by the new hyperparameter tuning job are evaluated by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the WarmStartType value for the warm start configuration, the training job that performs the best in the new tuning job is compared to the best training jobs from the parent tuning jobs. From these, the training job that performs the best as measured by the objective metric is returned as the overall best training job. \n All training jobs launched by parent hyperparameter tuning jobs and the new hyperparameter tuning jobs count against the limit of training jobs for the tuning job." + description: "Specifies the configuration for starting the hyperparameter tuning job using\none or more previous tuning jobs as a starting point. The results of previous\ntuning jobs are used to inform which combinations of hyperparameters to search\nover in the new tuning job.\n\n\nAll training jobs launched by the new hyperparameter tuning job are evaluated\nby using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM\nas the WarmStartType value for the warm start configuration, the training\njob that performs the best in the new tuning job is compared to the best\ntraining jobs from the parent tuning jobs. From these, the training job that\nperforms the best as measured by the objective metric is returned as the\noverall best training job.\n\n\nAll training jobs launched by parent hyperparameter tuning jobs and the new\nhyperparameter tuning jobs count against the limit of training jobs for the\ntuning job." properties: parentHyperParameterTuningJobs: items: - description: "A previously completed or stopped hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job." + description: "A previously completed or stopped hyperparameter tuning job to be used as\na starting point for a new hyperparameter tuning job." properties: hyperParameterTuningJobName: type: "string" @@ -601,13 +601,13 @@ spec: description: "HyperParameterTuningJobStatus defines the observed state of HyperParameterTuningJob" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -617,7 +617,7 @@ spec: - "region" type: "object" bestTrainingJob: - description: "A TrainingJobSummary object that describes the training job that completed with the best current HyperParameterTuningJobObjective." + description: "A TrainingJobSummary object that describes the training job that completed\nwith the best current HyperParameterTuningJobObjective." properties: creationTime: format: "date-time" @@ -625,7 +625,7 @@ spec: failureReason: type: "string" finalHyperParameterTuningJobObjectiveMetric: - description: "Shows the latest objective metric emitted by a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig." + description: "Shows the latest objective metric emitted by a training job that was launched\nby a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective\nparameter of HyperParameterTuningJobConfig." properties: metricName: type: "string" @@ -658,9 +658,9 @@ spec: type: "string" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -687,10 +687,10 @@ spec: description: "If the tuning job failed, the reason it failed." type: "string" hyperParameterTuningJobStatus: - description: "The status of the tuning job: InProgress, Completed, Failed, Stopping, or Stopped." + description: "The status of the tuning job: InProgress, Completed, Failed, Stopping, or\nStopped." type: "string" overallBestTrainingJob: - description: "If the hyperparameter tuning job is an warm start tuning job with a WarmStartType of IDENTICAL_DATA_AND_ALGORITHM, this is the TrainingJobSummary for the training job with the best objective metric value of all training jobs launched by this tuning job and all parent jobs specified for the warm start tuning job." + description: "If the hyperparameter tuning job is an warm start tuning job with a WarmStartType\nof IDENTICAL_DATA_AND_ALGORITHM, this is the TrainingJobSummary for the training\njob with the best objective metric value of all training jobs launched by\nthis tuning job and all parent jobs specified for the warm start tuning job." properties: creationTime: format: "date-time" @@ -698,7 +698,7 @@ spec: failureReason: type: "string" finalHyperParameterTuningJobObjectiveMetric: - description: "Shows the latest objective metric emitted by a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig." + description: "Shows the latest objective metric emitted by a training job that was launched\nby a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective\nparameter of HyperParameterTuningJobConfig." properties: metricName: type: "string" diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelbiasjobdefinitions.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelbiasjobdefinitions.yaml index ecc418301..010ccd393 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelbiasjobdefinitions.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelbiasjobdefinitions.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "modelbiasjobdefinitions.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "ModelBiasJobDefinition is the Schema for the ModelBiasJobDefinitions API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,7 +30,7 @@ spec: description: "ModelBiasJobDefinitionSpec defines the desired state of ModelBiasJobDefinition." properties: jobDefinitionName: - description: "The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account." + description: "The name of the bias job definition. The name must be unique within an Amazon\nWeb Services Region in the Amazon Web Services account." type: "string" jobResources: description: "Identifies the resources to deploy for a monitoring job." @@ -118,7 +118,7 @@ spec: description: "The output object for a monitoring job." properties: s3Output: - description: "Information about where and how you want to store the results of a monitoring job." + description: "Information about where and how you want to store the results of a monitoring\njob." properties: localPath: type: "string" @@ -138,7 +138,7 @@ spec: enableNetworkIsolation: type: "boolean" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -151,7 +151,7 @@ spec: type: "object" type: "object" roleARN: - description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf." + description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume\nto perform tasks on your behalf." type: "string" stoppingCondition: description: "A time limit for how long the monitoring job is allowed to run before stopping." @@ -161,9 +161,9 @@ spec: type: "integer" type: "object" tags: - description: "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide." + description: "(Optional) An array of key-value pairs. For more information, see Using Cost\nAllocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL)\nin the Amazon Web Services Billing and Cost Management User Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -183,13 +183,13 @@ spec: description: "ModelBiasJobDefinitionStatus defines the observed state of ModelBiasJobDefinition" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -199,9 +199,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelexplainabilityjobdefinitions.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelexplainabilityjobdefinitions.yaml index 9cf5e6e68..cbe9aae09 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelexplainabilityjobdefinitions.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelexplainabilityjobdefinitions.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "modelexplainabilityjobdefinitions.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "ModelExplainabilityJobDefinition is the Schema for the ModelExplainabilityJobDefinitions API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,7 +30,7 @@ spec: description: "ModelExplainabilityJobDefinitionSpec defines the desired state of ModelExplainabilityJobDefinition." properties: jobDefinitionName: - description: "The name of the model explainability job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account." + description: "The name of the model explainability job definition. The name must be unique\nwithin an Amazon Web Services Region in the Amazon Web Services account." type: "string" jobResources: description: "Identifies the resources to deploy for a monitoring job." @@ -51,7 +51,7 @@ spec: type: "object" type: "object" modelExplainabilityAppSpecification: - description: "Configures the model explainability job to run a specified Docker container image." + description: "Configures the model explainability job to run a specified Docker container\nimage." properties: configURI: type: "string" @@ -112,7 +112,7 @@ spec: description: "The output object for a monitoring job." properties: s3Output: - description: "Information about where and how you want to store the results of a monitoring job." + description: "Information about where and how you want to store the results of a monitoring\njob." properties: localPath: type: "string" @@ -132,7 +132,7 @@ spec: enableNetworkIsolation: type: "boolean" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -145,7 +145,7 @@ spec: type: "object" type: "object" roleARN: - description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf." + description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume\nto perform tasks on your behalf." type: "string" stoppingCondition: description: "A time limit for how long the monitoring job is allowed to run before stopping." @@ -155,9 +155,9 @@ spec: type: "integer" type: "object" tags: - description: "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide." + description: "(Optional) An array of key-value pairs. For more information, see Using Cost\nAllocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL)\nin the Amazon Web Services Billing and Cost Management User Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -177,13 +177,13 @@ spec: description: "ModelExplainabilityJobDefinitionStatus defines the observed state of ModelExplainabilityJobDefinition" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -193,9 +193,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackagegroups.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackagegroups.yaml index d7b145f93..18c88ec46 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackagegroups.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackagegroups.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "modelpackagegroups.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -23,15 +23,15 @@ spec: description: "ModelPackageGroup is the Schema for the ModelPackageGroups API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "ModelPackageGroupSpec defines the desired state of ModelPackageGroup. \n A group of versioned models in the model registry." + description: "ModelPackageGroupSpec defines the desired state of ModelPackageGroup.\n\n\nA group of versioned models in the model registry." properties: modelPackageGroupDescription: description: "A description for the model group." @@ -40,9 +40,9 @@ spec: description: "The name of the model group." type: "string" tags: - description: "A list of key value pairs associated with the model group. For more information, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon Web Services General Reference Guide." + description: "A list of key value pairs associated with the model group. For more information,\nsee Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)\nin the Amazon Web Services General Reference Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -57,13 +57,13 @@ spec: description: "ModelPackageGroupStatus defines the observed state of ModelPackageGroup" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -73,9 +73,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackages.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackages.yaml index 610982b90..7b062ba7b 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackages.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelpackages.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "modelpackages.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -23,20 +23,20 @@ spec: description: "ModelPackage is the Schema for the ModelPackages API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "ModelPackageSpec defines the desired state of ModelPackage. \n A versioned model that can be deployed for SageMaker inference." + description: "ModelPackageSpec defines the desired state of ModelPackage.\n\n\nA versioned model that can be deployed for SageMaker inference." properties: additionalInferenceSpecifications: - description: "An array of additional Inference Specification objects. Each additional Inference Specification specifies artifacts based on this model package that can be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts." + description: "An array of additional Inference Specification objects. Each additional Inference\nSpecification specifies artifacts based on this model package that can be\nused on inference endpoints. Generally used with SageMaker Neo to store the\ncompiled artifacts." items: - description: "A structure of additional Inference Specification. Additional Inference Specification specifies details about inference jobs that can be run with models based on this model package" + description: "A structure of additional Inference Specification. Additional Inference Specification\nspecifies details about inference jobs that can be run with models based\non this model package" properties: containers: items: @@ -96,7 +96,7 @@ spec: description: "A description for the approval status of the model." type: "string" certifyForMarketplace: - description: "Whether to certify the model package for listing on Amazon Web Services Marketplace. \n This parameter is optional for unversioned models, and does not apply to versioned models." + description: "Whether to certify the model package for listing on Amazon Web Services Marketplace.\n\n\nThis parameter is optional for unversioned models, and does not apply to\nversioned models." type: "boolean" clientToken: description: "A unique token that guarantees that the call to this API is idempotent." @@ -107,13 +107,13 @@ spec: description: "The metadata properties associated with the model package versions." type: "object" domain: - description: "The machine learning domain of your model package and its components. Common machine learning domains include computer vision and natural language processing." + description: "The machine learning domain of your model package and its components. Common\nmachine learning domains include computer vision and natural language processing." type: "string" driftCheckBaselines: - description: "Represents the drift check baselines that can be used when the model monitor is set using the model package. For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines (https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-quality-clarify-baseline-lifecycle.html#pipelines-quality-clarify-baseline-drift-detection) in the Amazon SageMaker Developer Guide." + description: "Represents the drift check baselines that can be used when the model monitor\nis set using the model package. For more information, see the topic on Drift\nDetection against Previous Baselines in SageMaker Pipelines (https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-quality-clarify-baseline-lifecycle.html#pipelines-quality-clarify-baseline-drift-detection)\nin the Amazon SageMaker Developer Guide." properties: bias: - description: "Represents the drift check bias baselines that can be used when the model monitor is set using the model package." + description: "Represents the drift check bias baselines that can be used when the model\nmonitor is set using the model package." properties: configFile: description: "Contains details regarding the file source." @@ -147,7 +147,7 @@ spec: type: "object" type: "object" explainability: - description: "Represents the drift check explainability baselines that can be used when the model monitor is set using the model package." + description: "Represents the drift check explainability baselines that can be used when\nthe model monitor is set using the model package." properties: configFile: description: "Contains details regarding the file source." @@ -171,7 +171,7 @@ spec: type: "object" type: "object" modelDataQuality: - description: "Represents the drift check data quality baselines that can be used when the model monitor is set using the model package." + description: "Represents the drift check data quality baselines that can be used when the\nmodel monitor is set using the model package." properties: constraints: description: "Details about the metrics source." @@ -195,7 +195,7 @@ spec: type: "object" type: "object" modelQuality: - description: "Represents the drift check model quality baselines that can be used when the model monitor is set using the model package." + description: "Represents the drift check model quality baselines that can be used when\nthe model monitor is set using the model package." properties: constraints: description: "Details about the metrics source." @@ -220,7 +220,7 @@ spec: type: "object" type: "object" inferenceSpecification: - description: "Specifies details about inference jobs that can be run with models based on this model package, including the following: \n * The Amazon ECR paths of containers that contain the inference code and model artifacts. \n * The instance types that the model package supports for transform jobs and real-time endpoints used for inference. \n * The input and output content formats that the model package supports for inference." + description: "Specifies details about inference jobs that can be run with models based\non this model package, including the following:\n\n\n * The Amazon ECR paths of containers that contain the inference code and\n model artifacts.\n\n\n * The instance types that the model package supports for transform jobs\n and real-time endpoints used for inference.\n\n\n * The input and output content formats that the model package supports\n for inference." properties: containers: items: @@ -284,7 +284,7 @@ spec: type: "string" type: "object" modelApprovalStatus: - description: "Whether the model is approved for deployment. \n This parameter is optional for versioned models, and does not apply to unversioned models. \n For versioned models, the value of this parameter must be set to Approved to deploy the model." + description: "Whether the model is approved for deployment.\n\n\nThis parameter is optional for versioned models, and does not apply to unversioned\nmodels.\n\n\nFor versioned models, the value of this parameter must be set to Approved\nto deploy the model." type: "string" modelMetrics: description: "A structure that contains model metrics reports." @@ -390,20 +390,20 @@ spec: description: "A description of the model package." type: "string" modelPackageGroupName: - description: "The name or Amazon Resource Name (ARN) of the model package group that this model version belongs to. \n This parameter is required for versioned models, and does not apply to unversioned models." + description: "The name or Amazon Resource Name (ARN) of the model package group that this\nmodel version belongs to.\n\n\nThis parameter is required for versioned models, and does not apply to unversioned\nmodels." type: "string" modelPackageName: - description: "The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen). \n This parameter is required for unversioned models. It is not applicable to versioned models." + description: "The name of the model package. The name must have 1 to 63 characters. Valid\ncharacters are a-z, A-Z, 0-9, and - (hyphen).\n\n\nThis parameter is required for unversioned models. It is not applicable to\nversioned models." type: "string" samplePayloadURL: - description: "The Amazon Simple Storage Service (Amazon S3) path where the sample payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). This archive can hold multiple files that are all equally used in the load test. Each file in the archive must satisfy the size constraints of the InvokeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpoint.html#API_runtime_InvokeEndpoint_RequestSyntax) call." + description: "The Amazon Simple Storage Service (Amazon S3) path where the sample payload\nis stored. This path must point to a single gzip compressed tar archive (.tar.gz\nsuffix). This archive can hold multiple files that are all equally used in\nthe load test. Each file in the archive must satisfy the size constraints\nof the InvokeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpoint.html#API_runtime_InvokeEndpoint_RequestSyntax)\ncall." type: "string" sourceAlgorithmSpecification: description: "Details about the algorithm that was used to create the model package." properties: sourceAlgorithms: items: - description: "Specifies an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your SageMaker account or an algorithm in Amazon Web Services Marketplace that you are subscribed to." + description: "Specifies an algorithm that was used to create the model package. The algorithm\nmust be either an algorithm resource in your SageMaker account or an algorithm\nin Amazon Web Services Marketplace that you are subscribed to." properties: algorithmName: type: "string" @@ -413,9 +413,9 @@ spec: type: "array" type: "object" tags: - description: "A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon Web Services General Reference Guide." + description: "A list of key value pairs associated with the model. For more information,\nsee Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)\nin the Amazon Web Services General Reference Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -424,19 +424,19 @@ spec: type: "object" type: "array" task: - description: "The machine learning task your model package accomplishes. Common machine learning tasks include object detection and image classification. The following tasks are supported by Inference Recommender: \"IMAGE_CLASSIFICATION\" | \"OBJECT_DETECTION\" | \"TEXT_GENERATION\" |\"IMAGE_SEGMENTATION\" | \"FILL_MASK\" | \"CLASSIFICATION\" | \"REGRESSION\" | \"OTHER\". \n Specify \"OTHER\" if none of the tasks listed fit your use case." + description: "The machine learning task your model package accomplishes. Common machine\nlearning tasks include object detection and image classification. The following\ntasks are supported by Inference Recommender: \"IMAGE_CLASSIFICATION\" | \"OBJECT_DETECTION\"\n| \"TEXT_GENERATION\" |\"IMAGE_SEGMENTATION\" | \"FILL_MASK\" | \"CLASSIFICATION\"\n| \"REGRESSION\" | \"OTHER\".\n\n\nSpecify \"OTHER\" if none of the tasks listed fit your use case." type: "string" validationSpecification: - description: "Specifies configurations for one or more transform jobs that SageMaker runs to test the model package." + description: "Specifies configurations for one or more transform jobs that SageMaker runs\nto test the model package." properties: validationProfiles: items: - description: "Contains data, such as the inputs and targeted instance types that are used in the process of validating the model package. \n The data provided in the validation profile is made available to your buyers on Amazon Web Services Marketplace." + description: "Contains data, such as the inputs and targeted instance types that are used\nin the process of validating the model package.\n\n\nThe data provided in the validation profile is made available to your buyers\non Amazon Web Services Marketplace." properties: profileName: type: "string" transformJobDefinition: - description: "Defines the input needed to run a transform job using the inference specification specified in the algorithm." + description: "Defines the input needed to run a transform job using the inference specification\nspecified in the algorithm." properties: batchStrategy: type: "string" @@ -451,7 +451,7 @@ spec: format: "int64" type: "integer" transformInput: - description: "Describes the input source of a transform job and the way the transform job consumes it." + description: "Describes the input source of a transform job and the way the transform job\nconsumes it." properties: compressionType: type: "string" @@ -485,7 +485,7 @@ spec: type: "string" type: "object" transformResources: - description: "Describes the resources, including ML instance types and ML instance count, to use for transform job." + description: "Describes the resources, including ML instance types and ML instance count,\nto use for transform job." properties: instanceCount: format: "int64" @@ -506,13 +506,13 @@ spec: description: "ModelPackageStatus defines the observed state of ModelPackage" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -522,9 +522,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelqualityjobdefinitions.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelqualityjobdefinitions.yaml index 9b7b47eaa..3aa3e6f8f 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelqualityjobdefinitions.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/modelqualityjobdefinitions.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "modelqualityjobdefinitions.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "ModelQualityJobDefinition is the Schema for the ModelQualityJobDefinitions API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -130,7 +130,7 @@ spec: description: "The output object for a monitoring job." properties: s3Output: - description: "Information about where and how you want to store the results of a monitoring job." + description: "Information about where and how you want to store the results of a monitoring\njob." properties: localPath: type: "string" @@ -150,7 +150,7 @@ spec: enableNetworkIsolation: type: "boolean" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -163,7 +163,7 @@ spec: type: "object" type: "object" roleARN: - description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf." + description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume\nto perform tasks on your behalf." type: "string" stoppingCondition: description: "A time limit for how long the monitoring job is allowed to run before stopping." @@ -173,9 +173,9 @@ spec: type: "integer" type: "object" tags: - description: "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide." + description: "(Optional) An array of key-value pairs. For more information, see Using Cost\nAllocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL)\nin the Amazon Web Services Billing and Cost Management User Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -195,13 +195,13 @@ spec: description: "ModelQualityJobDefinitionStatus defines the observed state of ModelQualityJobDefinition" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -211,9 +211,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/models.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/models.yaml index 7fef096c0..db93d00c8 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/models.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/models.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "models.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -19,15 +19,15 @@ spec: description: "Model is the Schema for the Models API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "ModelSpec defines the desired state of Model. \n The properties of a model as returned by the Search API." + description: "ModelSpec defines the desired state of Model.\n\n\nThe properties of a model as returned by the Search API." properties: containers: description: "Specifies the containers in the inference pipeline." @@ -43,12 +43,12 @@ spec: image: type: "string" imageConfig: - description: "Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC)." + description: "Specifies whether the model container is in Amazon ECR or a private Docker\nregistry accessible from your Amazon Virtual Private Cloud (VPC)." properties: repositoryAccessMode: type: "string" repositoryAuthConfig: - description: "Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field of the ImageConfig object that you passed to a call to CreateModel and the private Docker registry where the model image is hosted requires authentication." + description: "Specifies an authentication configuration for the private docker registry\nwhere your model image is hosted. Specify a value for this property only\nif you specified Vpc as the value for the RepositoryAccessMode field of the\nImageConfig object that you passed to a call to CreateModel and the private\nDocker registry where the model image is hosted requires authentication." properties: repositoryCredentialsProviderARN: type: "string" @@ -71,10 +71,10 @@ spec: type: "object" type: "array" enableNetworkIsolation: - description: "Isolates the model container. No inbound or outbound network calls can be made to or from the model container." + description: "Isolates the model container. No inbound or outbound network calls can be\nmade to or from the model container." type: "boolean" executionRoleARN: - description: "The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). \n To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission." + description: "The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume\nto access model artifacts and docker image for deployment on ML compute instances\nor for batch transform jobs. Deploying on ML compute instances is part of\nmodel hosting. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html).\n\n\nTo be able to pass this role to SageMaker, the caller of this API must have\nthe iam:PassRole permission." type: "string" inferenceExecutionConfig: description: "Specifies details of how containers in a multi-container endpoint are called." @@ -86,7 +86,7 @@ spec: description: "The name of the new model." type: "string" primaryContainer: - description: "The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions." + description: "The location of the primary docker image containing inference code, associated\nartifacts, and custom environment map that the inference code uses when the\nmodel is deployed for predictions." properties: containerHostname: type: "string" @@ -97,12 +97,12 @@ spec: image: type: "string" imageConfig: - description: "Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC)." + description: "Specifies whether the model container is in Amazon ECR or a private Docker\nregistry accessible from your Amazon Virtual Private Cloud (VPC)." properties: repositoryAccessMode: type: "string" repositoryAuthConfig: - description: "Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field of the ImageConfig object that you passed to a call to CreateModel and the private Docker registry where the model image is hosted requires authentication." + description: "Specifies an authentication configuration for the private docker registry\nwhere your model image is hosted. Specify a value for this property only\nif you specified Vpc as the value for the RepositoryAccessMode field of the\nImageConfig object that you passed to a call to CreateModel and the private\nDocker registry where the model image is hosted requires authentication." properties: repositoryCredentialsProviderARN: type: "string" @@ -124,9 +124,9 @@ spec: type: "object" type: "object" tags: - description: "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." + description: "An array of key-value pairs. You can use tags to categorize your Amazon Web\nServices resources in different ways, for example, by purpose, owner, or\nenvironment. For more information, see Tagging Amazon Web Services Resources\n(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -135,7 +135,7 @@ spec: type: "object" type: "array" vpcConfig: - description: "A VpcConfig object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. VpcConfig is used in hosting services and in batch transform. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html)." + description: "A VpcConfig object that specifies the VPC that you want your model to connect\nto. Control access to and from your model container by configuring the VPC.\nVpcConfig is used in hosting services and in batch transform. For more information,\nsee Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private\nCloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html)." properties: securityGroupIDs: items: @@ -154,13 +154,13 @@ spec: description: "ModelStatus defines the observed state of Model" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -170,9 +170,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/monitoringschedules.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/monitoringschedules.yaml index 8c1bd50c2..29386c49a 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/monitoringschedules.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/monitoringschedules.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "monitoringschedules.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -27,24 +27,24 @@ spec: description: "MonitoringSchedule is the Schema for the MonitoringSchedules API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "MonitoringScheduleSpec defines the desired state of MonitoringSchedule. \n A schedule for a model monitoring job. For information about model monitor, see Amazon SageMaker Model Monitor (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html)." + description: "MonitoringScheduleSpec defines the desired state of MonitoringSchedule.\n\n\nA schedule for a model monitoring job. For information about model monitor,\nsee Amazon SageMaker Model Monitor (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html)." properties: monitoringScheduleConfig: - description: "The configuration object that specifies the monitoring schedule and defines the monitoring job." + description: "The configuration object that specifies the monitoring schedule and defines\nthe monitoring job." properties: monitoringJobDefinition: description: "Defines the monitoring job." properties: baselineConfig: - description: "Configuration for monitoring constraints and monitoring statistics. These baseline resources are compared against the results of the current job from the series of jobs scheduled to collect data periodically." + description: "Configuration for monitoring constraints and monitoring statistics. These\nbaseline resources are compared against the results of the current job from\nthe series of jobs scheduled to collect data periodically." properties: baseliningJobName: type: "string" @@ -123,7 +123,7 @@ spec: description: "The output object for a monitoring job." properties: s3Output: - description: "Information about where and how you want to store the results of a monitoring job." + description: "Information about where and how you want to store the results of a monitoring\njob." properties: localPath: type: "string" @@ -154,14 +154,14 @@ spec: type: "object" type: "object" networkConfig: - description: "Networking options for a job, such as network traffic encryption between containers, whether to allow inbound and outbound network calls to and from containers, and the VPC subnets and security groups to use for VPC-enabled jobs." + description: "Networking options for a job, such as network traffic encryption between\ncontainers, whether to allow inbound and outbound network calls to and from\ncontainers, and the VPC subnets and security groups to use for VPC-enabled\njobs." properties: enableInterContainerTrafficEncryption: type: "boolean" enableNetworkIsolation: type: "boolean" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -195,12 +195,12 @@ spec: type: "object" type: "object" monitoringScheduleName: - description: "The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within an Amazon Web Services account." + description: "The name of the monitoring schedule. The name must be unique within an Amazon\nWeb Services Region within an Amazon Web Services account." type: "string" tags: - description: "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide." + description: "(Optional) An array of key-value pairs. For more information, see Using Cost\nAllocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL)\nin the Amazon Web Services Billing and Cost Management User Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -216,13 +216,13 @@ spec: description: "MonitoringScheduleStatus defines the observed state of MonitoringSchedule" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -232,9 +232,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -262,7 +262,7 @@ spec: format: "date-time" type: "string" failureReason: - description: "A string, up to one KB in size, that contains the reason a monitoring job failed, if it failed." + description: "A string, up to one KB in size, that contains the reason a monitoring job\nfailed, if it failed." type: "string" lastModifiedTime: description: "The time at which the monitoring job was last modified." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstancelifecycleconfigs.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstancelifecycleconfigs.yaml index 4cb7c686d..dff137584 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstancelifecycleconfigs.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstancelifecycleconfigs.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "notebookinstancelifecycleconfigs.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -26,10 +26,10 @@ spec: description: "NotebookInstanceLifecycleConfig is the Schema for the NotebookInstanceLifecycleConfigs API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -40,18 +40,18 @@ spec: description: "The name of the lifecycle configuration." type: "string" onCreate: - description: "A shell script that runs only once, when you create a notebook instance. The shell script must be a base64-encoded string." + description: "A shell script that runs only once, when you create a notebook instance.\nThe shell script must be a base64-encoded string." items: - description: "Contains the notebook instance lifecycle configuration script. \n Each lifecycle configuration script has a limit of 16384 characters. \n The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin. \n View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook]. \n Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started. \n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html)." + description: "Contains the notebook instance lifecycle configuration script.\n\n\nEach lifecycle configuration script has a limit of 16384 characters.\n\n\nThe value of the $PATH environment variable that is available to both scripts\nis /sbin:bin:/usr/sbin:/usr/bin.\n\n\nView CloudWatch Logs for notebook instance lifecycle configurations in log\ngroup /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].\n\n\nLifecycle configuration scripts cannot run for longer than 5 minutes. If\na script runs for longer than 5 minutes, it fails and the notebook instance\nis not created or started.\n\n\nFor information about notebook instance lifestyle configurations, see Step\n2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html)." properties: content: type: "string" type: "object" type: "array" onStart: - description: "A shell script that runs every time you start a notebook instance, including when you create the notebook instance. The shell script must be a base64-encoded string." + description: "A shell script that runs every time you start a notebook instance, including\nwhen you create the notebook instance. The shell script must be a base64-encoded\nstring." items: - description: "Contains the notebook instance lifecycle configuration script. \n Each lifecycle configuration script has a limit of 16384 characters. \n The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin. \n View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook]. \n Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started. \n For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html)." + description: "Contains the notebook instance lifecycle configuration script.\n\n\nEach lifecycle configuration script has a limit of 16384 characters.\n\n\nThe value of the $PATH environment variable that is available to both scripts\nis /sbin:bin:/usr/sbin:/usr/bin.\n\n\nView CloudWatch Logs for notebook instance lifecycle configurations in log\ngroup /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].\n\n\nLifecycle configuration scripts cannot run for longer than 5 minutes. If\na script runs for longer than 5 minutes, it fails and the notebook instance\nis not created or started.\n\n\nFor information about notebook instance lifestyle configurations, see Step\n2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html)." properties: content: type: "string" @@ -64,13 +64,13 @@ spec: description: "NotebookInstanceLifecycleConfigStatus defines the observed state of NotebookInstanceLifecycleConfig" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -80,9 +80,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstances.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstances.yaml index 6b6033185..93ff7f631 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstances.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/notebookinstances.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "notebookinstances.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -27,10 +27,10 @@ spec: description: "NotebookInstance is the Schema for the NotebookInstances API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -38,29 +38,29 @@ spec: description: "NotebookInstanceSpec defines the desired state of NotebookInstance." properties: acceleratorTypes: - description: "A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html)." + description: "A list of Elastic Inference (EI) instance types to associate with this notebook\ninstance. Currently, only one instance type can be associated with a notebook\ninstance. For more information, see Using Elastic Inference in Amazon SageMaker\n(https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html)." items: type: "string" type: "array" additionalCodeRepositories: - description: "An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html)." + description: "An array of up to three Git repositories to associate with the notebook instance.\nThese can be either the names of Git repositories stored as resources in\nyour account, or the URL of Git repositories in Amazon Web Services CodeCommit\n(https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or\nin any other Git repository. These repositories are cloned at the same level\nas the default repository of your notebook instance. For more information,\nsee Associating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html)." items: type: "string" type: "array" defaultCodeRepository: - description: "A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html)." + description: "A Git repository to associate with the notebook instance as its default code\nrepository. This can be either the name of a Git repository stored as a resource\nin your account, or the URL of a Git repository in Amazon Web Services CodeCommit\n(https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or\nin any other Git repository. When you open a notebook instance, it opens\nin the directory that contains this repository. For more information, see\nAssociating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html)." type: "string" directInternetAccess: - description: "Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC. \n For more information, see Notebook Instances Are Internet-Enabled by Default (https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access). You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter." + description: "Sets whether SageMaker provides internet access to the notebook instance.\nIf you set this to Disabled this notebook instance is able to access resources\nonly in your VPC, and is not be able to connect to SageMaker training and\nendpoint services unless you configure a NAT Gateway in your VPC.\n\n\nFor more information, see Notebook Instances Are Internet-Enabled by Default\n(https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access).\nYou can set the value of this parameter to Disabled only if you set a value\nfor the SubnetId parameter." type: "string" instanceType: description: "The type of ML compute instance to launch for the notebook instance." type: "string" kmsKeyID: - description: "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the Amazon Web Services Key Management Service Developer Guide." + description: "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service\nkey that SageMaker uses to encrypt data on the storage volume attached to\nyour notebook instance. The KMS key you provide must be enabled. For information,\nsee Enabling and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html)\nin the Amazon Web Services Key Management Service Developer Guide." type: "string" lifecycleConfigName: - description: "The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html)." + description: "The name of a lifecycle configuration to associate with the notebook instance.\nFor information about lifestyle configurations, see Step 2.1: (Optional)\nCustomize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html)." type: "string" notebookInstanceName: description: "The name of the new notebook instance." @@ -69,23 +69,23 @@ spec: description: "The platform identifier of the notebook instance runtime environment." type: "string" roleARN: - description: "When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). \n To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission." + description: "When you send any requests to Amazon Web Services resources from the notebook\ninstance, SageMaker assumes this role to perform tasks on your behalf. You\nmust grant this role necessary permissions so SageMaker can perform these\ntasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com)\npermissions to assume this role. For more information, see SageMaker Roles\n(https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html).\n\n\nTo be able to pass this role to SageMaker, the caller of this API must have\nthe iam:PassRole permission." type: "string" rootAccess: - description: "Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. \n Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users." + description: "Whether root access is enabled or disabled for users of the notebook instance.\nThe default value is Enabled.\n\n\nLifecycle configurations need root access to be able to set up a notebook\ninstance. Because of this, lifecycle configurations associated with a notebook\ninstance always run with root access even if you disable root access for\nusers." type: "string" securityGroupIDs: - description: "The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet." + description: "The VPC security group IDs, in the form sg-xxxxxxxx. The security groups\nmust be for the same VPC as specified in the subnet." items: type: "string" type: "array" subnetID: - description: "The ID of the subnet in a VPC to which you would like to have a connectivity from your ML compute instance." + description: "The ID of the subnet in a VPC to which you would like to have a connectivity\nfrom your ML compute instance." type: "string" tags: - description: "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." + description: "An array of key-value pairs. You can use tags to categorize your Amazon Web\nServices resources in different ways, for example, by purpose, owner, or\nenvironment. For more information, see Tagging Amazon Web Services Resources\n(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -94,7 +94,7 @@ spec: type: "object" type: "array" volumeSizeInGB: - description: "The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB." + description: "The size, in GB, of the ML storage volume to attach to the notebook instance.\nThe default value is 5 GB." format: "int64" type: "integer" required: @@ -106,13 +106,13 @@ spec: description: "NotebookInstanceStatus defines the observed state of NotebookInstance" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -122,9 +122,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -154,10 +154,10 @@ spec: description: "The status of the notebook instance." type: "string" stoppedByControllerMetadata: - description: "The URL that you use to connect to the Jupyter notebook that is running in your notebook instance." + description: "The URL that you use to connect to the Jupyter notebook that is running in\nyour notebook instance." type: "string" url: - description: "The URL that you use to connect to the Jupyter notebook that is running in your notebook instance." + description: "The URL that you use to connect to the Jupyter notebook that is running in\nyour notebook instance." type: "string" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/processingjobs.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/processingjobs.yaml index 5eb6b64fb..fd78812b5 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/processingjobs.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/processingjobs.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "processingjobs.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -27,15 +27,15 @@ spec: description: "ProcessingJob is the Schema for the ProcessingJobs API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "ProcessingJobSpec defines the desired state of ProcessingJob. \n An Amazon SageMaker processing job that is used to analyze data and evaluate models. For more information, see Process Data and Evaluate Models (https://docs.aws.amazon.com/sagemaker/latest/dg/processing-job.html)." + description: "ProcessingJobSpec defines the desired state of ProcessingJob.\n\n\nAn Amazon SageMaker processing job that is used to analyze data and evaluate\nmodels. For more information, see Process Data and Evaluate Models (https://docs.aws.amazon.com/sagemaker/latest/dg/processing-job.html)." properties: appSpecification: description: "Configures the processing job to run a specified Docker container image." @@ -54,10 +54,10 @@ spec: environment: additionalProperties: type: "string" - description: "The environment variables to set in the Docker container. Up to 100 key and values entries in the map are supported." + description: "The environment variables to set in the Docker container. Up to 100 key and\nvalues entries in the map are supported." type: "object" experimentConfig: - description: "Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: \n * CreateProcessingJob \n * CreateTrainingJob \n * CreateTransformJob" + description: "Associates a SageMaker job as a trial component with an experiment and trial.\nSpecified when you call the following APIs:\n\n\n * CreateProcessingJob\n\n\n * CreateTrainingJob\n\n\n * CreateTransformJob" properties: experimentName: type: "string" @@ -67,14 +67,14 @@ spec: type: "string" type: "object" networkConfig: - description: "Networking options for a processing job, such as whether to allow inbound and outbound network calls to and from processing containers, and the VPC subnets and security groups to use for VPC-enabled processing jobs." + description: "Networking options for a processing job, such as whether to allow inbound\nand outbound network calls to and from processing containers, and the VPC\nsubnets and security groups to use for VPC-enabled processing jobs." properties: enableInterContainerTrafficEncryption: type: "boolean" enableNetworkIsolation: type: "boolean" vpcConfig: - description: "Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "Specifies a VPC that your training jobs and hosted models have access to.\nControl access to and from your training and model containers by configuring\nthe VPC. For more information, see Protect Endpoints by Using an Amazon Virtual\nPrivate Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html)\nand Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -89,12 +89,12 @@ spec: processingInputs: description: "An array of inputs configuring the data to download into the processing container." items: - description: "The inputs for a processing job. The processing input must specify exactly one of either S3Input or DatasetDefinition types." + description: "The inputs for a processing job. The processing input must specify exactly\none of either S3Input or DatasetDefinition types." properties: appManaged: type: "boolean" datasetDefinition: - description: "Configuration for Dataset Definition inputs. The Dataset Definition input must specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition types." + description: "Configuration for Dataset Definition inputs. The Dataset Definition input\nmust specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition\ntypes." properties: athenaDatasetDefinition: description: "Configuration for Athena Dataset Definition input." @@ -160,7 +160,7 @@ spec: inputName: type: "string" s3Input: - description: "Configuration for downloading input data from Amazon S3 into the processing container." + description: "Configuration for downloading input data from Amazon S3 into the processing\ncontainer." properties: localPath: type: "string" @@ -178,7 +178,7 @@ spec: type: "object" type: "array" processingJobName: - description: "The name of the processing job. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account." + description: "The name of the processing job. The name must be unique within an Amazon\nWeb Services Region in the Amazon Web Services account." type: "string" processingOutputConfig: description: "Output configuration for the processing job." @@ -187,7 +187,7 @@ spec: type: "string" outputs: items: - description: "Describes the results of a processing job. The processing output must specify exactly one of either S3Output or FeatureStoreOutput types." + description: "Describes the results of a processing job. The processing output must specify\nexactly one of either S3Output or FeatureStoreOutput types." properties: appManaged: type: "boolean" @@ -200,7 +200,7 @@ spec: outputName: type: "string" s3Output: - description: "Configuration for uploading output data to Amazon S3 from the processing container." + description: "Configuration for uploading output data to Amazon S3 from the processing\ncontainer." properties: localPath: type: "string" @@ -213,7 +213,7 @@ spec: type: "array" type: "object" processingResources: - description: "Identifies the resources, ML compute instances, and ML storage volumes to deploy for a processing job. In distributed training, you specify more than one instance." + description: "Identifies the resources, ML compute instances, and ML storage volumes to\ndeploy for a processing job. In distributed training, you specify more than\none instance." properties: clusterConfig: description: "Configuration for the cluster used to run a processing job." @@ -231,7 +231,7 @@ spec: type: "object" type: "object" roleARN: - description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf." + description: "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume\nto perform tasks on your behalf." type: "string" stoppingCondition: description: "The time limit for how long the processing job is allowed to run." @@ -241,9 +241,9 @@ spec: type: "integer" type: "object" tags: - description: "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide." + description: "(Optional) An array of key-value pairs. For more information, see Using Cost\nAllocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL)\nin the Amazon Web Services Billing and Cost Management User Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -261,13 +261,13 @@ spec: description: "ProcessingJobStatus defines the observed state of ProcessingJob" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -277,9 +277,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -303,7 +303,7 @@ spec: type: "object" type: "array" failureReason: - description: "A string, up to one KB in size, that contains the reason a processing job failed, if it failed." + description: "A string, up to one KB in size, that contains the reason a processing job\nfailed, if it failed." type: "string" processingJobStatus: description: "Provides the status of a processing job." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/trainingjobs.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/trainingjobs.yaml index 4904f7b94..e97cf34ea 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/trainingjobs.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/trainingjobs.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "trainingjobs.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -30,18 +30,18 @@ spec: description: "TrainingJob is the Schema for the TrainingJobs API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "TrainingJobSpec defines the desired state of TrainingJob. \n Contains information about a training job." + description: "TrainingJobSpec defines the desired state of TrainingJob.\n\n\nContains information about a training job." properties: algorithmSpecification: - description: "The registry path of the Docker image that contains the training algorithm and algorithm-specific metadata, including the input mode. For more information about algorithms provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information about providing your own algorithms, see Using Your Own Algorithms with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html)." + description: "The registry path of the Docker image that contains the training algorithm\nand algorithm-specific metadata, including the input mode. For more information\nabout algorithms provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\nFor information about providing your own algorithms, see Using Your Own Algorithms\nwith Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html)." properties: algorithmName: type: "string" @@ -49,7 +49,7 @@ spec: type: "boolean" metricDefinitions: items: - description: "Specifies a metric that the training algorithm writes to stderr or stdout. SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job." + description: "Specifies a metric that the training algorithm writes to stderr or stdout.\nSageMakerhyperparameter tuning captures all defined metrics. You specify\none metric that a hyperparameter tuning job uses as its objective metric\nto choose the best training job." properties: name: type: "string" @@ -60,11 +60,11 @@ spec: trainingImage: type: "string" trainingInputMode: - description: "The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). \n Pipe mode \n If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. \n File mode \n If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. \n You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. \n For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. \n FastFile mode \n If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. \n FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided." + description: "The training input mode that the algorithm supports. For more information\nabout input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\n\n\nPipe mode\n\n\nIf an algorithm supports Pipe mode, Amazon SageMaker streams data directly\nfrom Amazon S3 to the container.\n\n\nFile mode\n\n\nIf an algorithm supports File mode, SageMaker downloads the training data\nfrom S3 to the provisioned ML storage volume, and mounts the directory to\nthe Docker volume for the training container.\n\n\nYou must provision the ML storage volume with sufficient capacity to accommodate\nthe data downloaded from S3. In addition to the training data, the ML storage\nvolume also stores the output model. The algorithm container uses the ML\nstorage volume to also store intermediate information, if any.\n\n\nFor distributed algorithms, training data is distributed uniformly. Your\ntraining duration is predictable if the input data objects sizes are approximately\nthe same. SageMaker does not split the files any further for model training.\nIf the object sizes are skewed, training won't be optimal as the data distribution\nis also skewed when one host in a training cluster is overloaded, thus becoming\na bottleneck in training.\n\n\nFastFile mode\n\n\nIf an algorithm supports FastFile mode, SageMaker streams data directly from\nS3 to the container with no code changes, and provides file system access\nto the data. Users can author their training script to interact with these\nfiles as if they were stored on disk.\n\n\nFastFile mode works best when the data is read sequentially. Augmented manifest\nfiles aren't supported. The startup time is lower when there are fewer files\nin the S3 bucket provided." type: "string" type: "object" checkpointConfig: - description: "Contains information about the output location for managed spot training checkpoint data." + description: "Contains information about the output location for managed spot training\ncheckpoint data." properties: localPath: type: "string" @@ -72,11 +72,11 @@ spec: type: "string" type: "object" debugHookConfig: - description: "Configuration information for the Amazon SageMaker Debugger hook parameters, metric and tensor collections, and storage paths. To learn more about how to configure the DebugHookConfig parameter, see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html)." + description: "Configuration information for the Amazon SageMaker Debugger hook parameters,\nmetric and tensor collections, and storage paths. To learn more about how\nto configure the DebugHookConfig parameter, see Use the SageMaker and Debugger\nConfiguration API Operations to Create, Update, and Debug Your Training Job\n(https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html)." properties: collectionConfigurations: items: - description: "Configuration information for the Amazon SageMaker Debugger output tensor collections." + description: "Configuration information for the Amazon SageMaker Debugger output tensor\ncollections." properties: collectionName: type: "string" @@ -96,9 +96,9 @@ spec: type: "string" type: "object" debugRuleConfigurations: - description: "Configuration information for Amazon SageMaker Debugger rules for debugging output tensors." + description: "Configuration information for Amazon SageMaker Debugger rules for debugging\noutput tensors." items: - description: "Configuration information for SageMaker Debugger rules for debugging. To learn more about how to configure the DebugRuleConfiguration parameter, see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html)." + description: "Configuration information for SageMaker Debugger rules for debugging. To\nlearn more about how to configure the DebugRuleConfiguration parameter, see\nUse the SageMaker and Debugger Configuration API Operations to Create, Update,\nand Debug Your Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html)." properties: instanceType: type: "string" @@ -120,13 +120,13 @@ spec: type: "object" type: "array" enableInterContainerTrafficEncryption: - description: "To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training. For more information, see Protect Communications Between ML Compute Instances in a Distributed Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html)." + description: "To encrypt all communications between ML compute instances in distributed\ntraining, choose True. Encryption provides greater security for distributed\ntraining, but training might take longer. How long it takes depends on the\namount of communication between compute instances, especially if you use\na deep learning algorithm in distributed training. For more information,\nsee Protect Communications Between ML Compute Instances in a Distributed\nTraining Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html)." type: "boolean" enableManagedSpotTraining: - description: "To train models using managed spot training, choose True. Managed spot training provides a fully managed and scalable infrastructure for training machine learning models. this option is useful when training jobs can be interrupted and when there is flexibility when the training job is run. \n The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be used as a starting point to train models incrementally. Amazon SageMaker provides metrics and logs in CloudWatch. They can be used to see when managed spot training jobs are running, interrupted, resumed, or completed." + description: "To train models using managed spot training, choose True. Managed spot training\nprovides a fully managed and scalable infrastructure for training machine\nlearning models. this option is useful when training jobs can be interrupted\nand when there is flexibility when the training job is run.\n\n\nThe complete and intermediate results of jobs are stored in an Amazon S3\nbucket, and can be used as a starting point to train models incrementally.\nAmazon SageMaker provides metrics and logs in CloudWatch. They can be used\nto see when managed spot training jobs are running, interrupted, resumed,\nor completed." type: "boolean" enableNetworkIsolation: - description: "Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access." + description: "Isolates the training container. No inbound or outbound network calls can\nbe made, except for calls between peers within a training cluster for distributed\ntraining. If you enable network isolation for training jobs that are configured\nto use a VPC, SageMaker downloads and uploads customer data and model artifacts\nthrough the specified VPC, but the training container does not have network\naccess." type: "boolean" environment: additionalProperties: @@ -134,7 +134,7 @@ spec: description: "The environment variables to set in the Docker container." type: "object" experimentConfig: - description: "Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: \n * CreateProcessingJob \n * CreateTrainingJob \n * CreateTransformJob" + description: "Associates a SageMaker job as a trial component with an experiment and trial.\nSpecified when you call the following APIs:\n\n\n * CreateProcessingJob\n\n\n * CreateTrainingJob\n\n\n * CreateTransformJob" properties: experimentName: type: "string" @@ -146,10 +146,10 @@ spec: hyperParameters: additionalProperties: type: "string" - description: "Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process. For a list of hyperparameters for each training algorithm provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). \n You can specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value pair. Each key and value is limited to 256 characters, as specified by the Length Constraint. \n Do not include any security-sensitive information including account access IDs, secrets or tokens in any hyperparameter field. If the use of security-sensitive credentials are detected, SageMaker will reject your training job request and return an exception error." + description: "Algorithm-specific parameters that influence the quality of the model. You\nset hyperparameters before you start the learning process. For a list of\nhyperparameters for each training algorithm provided by SageMaker, see Algorithms\n(https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\n\n\nYou can specify a maximum of 100 hyperparameters. Each hyperparameter is\na key-value pair. Each key and value is limited to 256 characters, as specified\nby the Length Constraint.\n\n\nDo not include any security-sensitive information including account access\nIDs, secrets or tokens in any hyperparameter field. If the use of security-sensitive\ncredentials are detected, SageMaker will reject your training job request\nand return an exception error." type: "object" inputDataConfig: - description: "An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location. \n Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3, EFS, or FSx location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format. \n Depending on the input mode that the algorithm supports, SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams. For example, if you specify an EFS location, input data files are available as input streams. They do not need to be downloaded." + description: "An array of Channel objects. Each channel is a named input source. InputDataConfig\ndescribes the input data and its location.\n\n\nAlgorithms can accept input data from one or more channels. For example,\nan algorithm might have two channels of input data, training_data and validation_data.\nThe configuration for each channel provides the S3, EFS, or FSx location\nwhere the input data is stored. It also provides information about the stored\ndata: the MIME type, compression method, and whether the data is wrapped\nin RecordIO format.\n\n\nDepending on the input mode that the algorithm supports, SageMaker either\ncopies input data files from an S3 bucket to a local directory in the Docker\ncontainer, or makes it available as input streams. For example, if you specify\nan EFS location, input data files are available as input streams. They do\nnot need to be downloaded." items: description: "A channel is a named input source that training algorithms can consume." properties: @@ -194,12 +194,12 @@ spec: type: "object" type: "object" inputMode: - description: "The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). \n Pipe mode \n If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. \n File mode \n If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. \n You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. \n For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. \n FastFile mode \n If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. \n FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided." + description: "The training input mode that the algorithm supports. For more information\nabout input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html).\n\n\nPipe mode\n\n\nIf an algorithm supports Pipe mode, Amazon SageMaker streams data directly\nfrom Amazon S3 to the container.\n\n\nFile mode\n\n\nIf an algorithm supports File mode, SageMaker downloads the training data\nfrom S3 to the provisioned ML storage volume, and mounts the directory to\nthe Docker volume for the training container.\n\n\nYou must provision the ML storage volume with sufficient capacity to accommodate\nthe data downloaded from S3. In addition to the training data, the ML storage\nvolume also stores the output model. The algorithm container uses the ML\nstorage volume to also store intermediate information, if any.\n\n\nFor distributed algorithms, training data is distributed uniformly. Your\ntraining duration is predictable if the input data objects sizes are approximately\nthe same. SageMaker does not split the files any further for model training.\nIf the object sizes are skewed, training won't be optimal as the data distribution\nis also skewed when one host in a training cluster is overloaded, thus becoming\na bottleneck in training.\n\n\nFastFile mode\n\n\nIf an algorithm supports FastFile mode, SageMaker streams data directly from\nS3 to the container with no code changes, and provides file system access\nto the data. Users can author their training script to interact with these\nfiles as if they were stored on disk.\n\n\nFastFile mode works best when the data is read sequentially. Augmented manifest\nfiles aren't supported. The startup time is lower when there are fewer files\nin the S3 bucket provided." type: "string" recordWrapperType: type: "string" shuffleConfig: - description: "A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. \n For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch." + description: "A configuration for a shuffle option for input data in a channel. If you\nuse S3Prefix for S3DataType, the results of the S3 key prefix matches are\nshuffled. If you use ManifestFile, the order of the S3 object references\nin the ManifestFile is shuffled. If you use AugmentedManifestFile, the order\nof the JSON lines in the AugmentedManifestFile is shuffled. The shuffling\norder is determined using the Seed value.\n\n\nFor Pipe input mode, when ShuffleConfig is specified shuffling is done at\nthe start of every epoch. With large datasets, this ensures that the order\nof the training data is different for each epoch, and it helps reduce bias\nand possible overfitting. In a multi-node training job when ShuffleConfig\nis combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled\nacross nodes so that the content sent to a particular node on the first epoch\nmight be sent to a different node on the second epoch." properties: seed: format: "int64" @@ -208,7 +208,7 @@ spec: type: "object" type: "array" outputDataConfig: - description: "Specifies the path to the S3 location where you want to store model artifacts. SageMaker creates subfolders for the artifacts." + description: "Specifies the path to the S3 location where you want to store model artifacts.\nSageMaker creates subfolders for the artifacts." properties: kmsKeyID: type: "string" @@ -216,7 +216,7 @@ spec: type: "string" type: "object" profilerConfig: - description: "Configuration information for Amazon SageMaker Debugger system monitoring, framework profiling, and storage paths." + description: "Configuration information for Amazon SageMaker Debugger system monitoring,\nframework profiling, and storage paths." properties: profilingIntervalInMilliseconds: format: "int64" @@ -229,7 +229,7 @@ spec: type: "string" type: "object" profilerRuleConfigurations: - description: "Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics." + description: "Configuration information for Amazon SageMaker Debugger rules for profiling\nsystem and framework metrics." items: description: "Configuration information for profiling rules." properties: @@ -253,14 +253,14 @@ spec: type: "object" type: "array" resourceConfig: - description: "The resources, including the ML compute instances and ML storage volumes, to use for model training. \n ML storage volumes store model artifacts and incremental states. Training algorithms might also use ML storage volumes for scratch space. If you want SageMaker to use the ML storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1." + description: "The resources, including the ML compute instances and ML storage volumes,\nto use for model training.\n\n\nML storage volumes store model artifacts and incremental states. Training\nalgorithms might also use ML storage volumes for scratch space. If you want\nSageMaker to use the ML storage volume to store the training data, choose\nFile as the TrainingInputMode in the algorithm specification. For distributed\ntraining algorithms, specify an instance count greater than 1." properties: instanceCount: format: "int64" type: "integer" instanceGroups: items: - description: "Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) API, you can configure multiple instance groups ." + description: "Defines an instance group for heterogeneous cluster training. When requesting\na training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html)\nAPI, you can configure multiple instance groups ." properties: instanceCount: format: "int64" @@ -290,10 +290,10 @@ spec: type: "integer" type: "object" roleARN: - description: "The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. \n During model training, SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). \n To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission." + description: "The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to\nperform tasks on your behalf.\n\n\nDuring model training, SageMaker needs your permission to read input data\nfrom an S3 bucket, download a Docker image that contains training code, write\nmodel artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and\npublish metrics to Amazon CloudWatch. You grant permissions for all of these\ntasks to an IAM role. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html).\n\n\nTo be able to pass this role to SageMaker, the caller of this API must have\nthe iam:PassRole permission." type: "string" stoppingCondition: - description: "Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs. \n To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost." + description: "Specifies a limit to how long a model training job can run. It also specifies\nhow long a managed Spot training job has to complete. When the job reaches\nthe time limit, SageMaker ends the training job. Use this API to cap model\ntraining costs.\n\n\nTo stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays\njob termination for 120 seconds. Algorithms can use this 120-second window\nto save the model artifacts, so the results of training are not lost." properties: maxRuntimeInSeconds: format: "int64" @@ -303,9 +303,9 @@ spec: type: "integer" type: "object" tags: - description: "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." + description: "An array of key-value pairs. You can use tags to categorize your Amazon Web\nServices resources in different ways, for example, by purpose, owner, or\nenvironment. For more information, see Tagging Amazon Web Services Resources\n(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -314,7 +314,7 @@ spec: type: "object" type: "array" tensorBoardOutputConfig: - description: "Configuration of storage locations for the Amazon SageMaker Debugger TensorBoard output data." + description: "Configuration of storage locations for the Amazon SageMaker Debugger TensorBoard\noutput data." properties: localPath: type: "string" @@ -322,10 +322,10 @@ spec: type: "string" type: "object" trainingJobName: - description: "The name of the training job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account." + description: "The name of the training job. The name must be unique within an Amazon Web\nServices Region in an Amazon Web Services account." type: "string" vpcConfig: - description: "A VpcConfig object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." + description: "A VpcConfig object that specifies the VPC that you want your training job\nto connect to. Control access to and from your training container by configuring\nthe VPC. For more information, see Protect Training Jobs by Using an Amazon\nVirtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html)." properties: securityGroupIDs: items: @@ -348,13 +348,13 @@ spec: description: "TrainingJobStatus defines the observed state of TrainingJob" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -364,9 +364,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -394,7 +394,7 @@ spec: format: "date-time" type: "string" debugRuleEvaluationStatuses: - description: "Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job." + description: "Evaluation status of Amazon SageMaker Debugger rules for debugging on a training\njob." items: description: "Information about the status of the rule evaluation." properties: @@ -419,13 +419,13 @@ spec: format: "date-time" type: "string" modelArtifacts: - description: "Information about the Amazon S3 location that is configured for storing model artifacts." + description: "Information about the Amazon S3 location that is configured for storing model\nartifacts." properties: s3ModelArtifacts: type: "string" type: "object" profilerRuleEvaluationStatuses: - description: "Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job." + description: "Evaluation status of Amazon SageMaker Debugger rules for profiling on a training\njob." items: description: "Information about the status of the rule evaluation." properties: @@ -446,10 +446,10 @@ spec: description: "Profiling status of a training job." type: "string" secondaryStatus: - description: "Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition. \n SageMaker provides primary statuses and secondary statuses that apply to each of them: \n InProgress \n * Starting - Starting the training job. \n * Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes. \n * Training - Training is in progress. \n * Interrupted - The job stopped because the managed spot training instances were interrupted. \n * Uploading - Training is complete and the model artifacts are being uploaded to the S3 location. \n Completed \n * Completed - The training job has completed. \n Failed \n * Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse. \n Stopped \n * MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime. \n * MaxWaitTimeExceeded - The job stopped because it exceeded the maximum allowed wait time. \n * Stopped - The training job has stopped. \n Stopping \n * Stopping - Stopping the training job. \n Valid values for SecondaryStatus are subject to change. \n We no longer support the following secondary statuses: \n * LaunchingMLInstances \n * PreparingTraining \n * DownloadingTrainingImage" + description: "Provides detailed information about the state of the training job. For detailed\ninformation on the secondary status of the training job, see StatusMessage\nunder SecondaryStatusTransition.\n\n\nSageMaker provides primary statuses and secondary statuses that apply to\neach of them:\n\n\nInProgress\n\n\n * Starting - Starting the training job.\n\n\n * Downloading - An optional stage for algorithms that support File training\n input mode. It indicates that data is being downloaded to the ML storage\n volumes.\n\n\n * Training - Training is in progress.\n\n\n * Interrupted - The job stopped because the managed spot training instances\n were interrupted.\n\n\n * Uploading - Training is complete and the model artifacts are being uploaded\n to the S3 location.\n\n\nCompleted\n\n\n * Completed - The training job has completed.\n\n\nFailed\n\n\n * Failed - The training job has failed. The reason for the failure is\n returned in the FailureReason field of DescribeTrainingJobResponse.\n\n\nStopped\n\n\n * MaxRuntimeExceeded - The job stopped because it exceeded the maximum\n allowed runtime.\n\n\n * MaxWaitTimeExceeded - The job stopped because it exceeded the maximum\n allowed wait time.\n\n\n * Stopped - The training job has stopped.\n\n\nStopping\n\n\n * Stopping - Stopping the training job.\n\n\nValid values for SecondaryStatus are subject to change.\n\n\nWe no longer support the following secondary statuses:\n\n\n * LaunchingMLInstances\n\n\n * PreparingTraining\n\n\n * DownloadingTrainingImage" type: "string" trainingJobStatus: - description: "The status of the training job. \n SageMaker provides the following training job statuses: \n * InProgress - The training is in progress. \n * Completed - The training job has completed. \n * Failed - The training job has failed. To see the reason for the failure, see the FailureReason field in the response to a DescribeTrainingJobResponse call. \n * Stopping - The training job is stopping. \n * Stopped - The training job has stopped. \n For more detailed information, see SecondaryStatus." + description: "The status of the training job.\n\n\nSageMaker provides the following training job statuses:\n\n\n * InProgress - The training is in progress.\n\n\n * Completed - The training job has completed.\n\n\n * Failed - The training job has failed. To see the reason for the failure,\n see the FailureReason field in the response to a DescribeTrainingJobResponse\n call.\n\n\n * Stopping - The training job is stopping.\n\n\n * Stopped - The training job has stopped.\n\n\nFor more detailed information, see SecondaryStatus." type: "string" warmPoolStatus: description: "The status of the warm pool associated with the training job." diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/transformjobs.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/transformjobs.yaml index 10e142e20..953de6415 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/transformjobs.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/transformjobs.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "transformjobs.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -27,21 +27,21 @@ spec: description: "TransformJob is the Schema for the TransformJobs API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" spec: - description: "TransformJobSpec defines the desired state of TransformJob. \n A batch transform job. For information about SageMaker batch transform, see Use Batch Transform (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html)." + description: "TransformJobSpec defines the desired state of TransformJob.\n\n\nA batch transform job. For information about SageMaker batch transform, see\nUse Batch Transform (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html)." properties: batchStrategy: - description: "Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record. \n To enable the batch strategy, you must set the SplitType property to Line, RecordIO, or TFRecord. \n To use only one record when making an HTTP invocation request to a container, set BatchStrategy to SingleRecord and SplitType to Line. \n To fit as many records in a mini-batch as can fit within the MaxPayloadInMB limit, set BatchStrategy to MultiRecord and SplitType to Line." + description: "Specifies the number of records to include in a mini-batch for an HTTP inference\nrequest. A record is a single unit of input data that inference can be made\non. For example, a single line in a CSV file is a record.\n\n\nTo enable the batch strategy, you must set the SplitType property to Line,\nRecordIO, or TFRecord.\n\n\nTo use only one record when making an HTTP invocation request to a container,\nset BatchStrategy to SingleRecord and SplitType to Line.\n\n\nTo fit as many records in a mini-batch as can fit within the MaxPayloadInMB\nlimit, set BatchStrategy to MultiRecord and SplitType to Line." type: "string" dataProcessing: - description: "The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html)." + description: "The data structure used to specify the data to be used for inference in a\nbatch transform job and to associate the data that is relevant to the prediction\nresults in the output. The input filter provided allows you to exclude input\ndata that is not needed for inference in a batch transform job. The output\nfilter provided allows you to include input data relevant to interpreting\nthe predictions in the output from the job. For more information, see Associate\nPrediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html)." properties: inputFilter: type: "string" @@ -53,10 +53,10 @@ spec: environment: additionalProperties: type: "string" - description: "The environment variables to set in the Docker container. We support up to 16 key and values entries in the map." + description: "The environment variables to set in the Docker container. We support up to\n16 key and values entries in the map." type: "object" experimentConfig: - description: "Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: \n * CreateProcessingJob \n * CreateTrainingJob \n * CreateTransformJob" + description: "Associates a SageMaker job as a trial component with an experiment and trial.\nSpecified when you call the following APIs:\n\n\n * CreateProcessingJob\n\n\n * CreateTrainingJob\n\n\n * CreateTransformJob" properties: experimentName: type: "string" @@ -66,15 +66,15 @@ spec: type: "string" type: "object" maxConcurrentTransforms: - description: "The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms." + description: "The maximum number of parallel requests that can be sent to each instance\nin a transform job. If MaxConcurrentTransforms is set to 0 or left unset,\nAmazon SageMaker checks the optional execution-parameters to determine the\nsettings for your chosen algorithm. If the execution-parameters endpoint\nis not enabled, the default value is 1. For more information on execution-parameters,\nsee How Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests).\nFor built-in algorithms, you don't need to set a value for MaxConcurrentTransforms." format: "int64" type: "integer" maxPayloadInMB: - description: "The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater than, or equal to, the size of a single record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The default value is 6 MB. \n The value of MaxPayloadInMB cannot be greater than 100 MB. If you specify the MaxConcurrentTransforms parameter, the value of (MaxConcurrentTransforms * MaxPayloadInMB) also cannot exceed 100 MB. \n For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the value to 0. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in algorithms do not support HTTP chunked encoding." + description: "The maximum allowed size of the payload, in MB. A payload is the data portion\nof a record (without metadata). The value in MaxPayloadInMB must be greater\nthan, or equal to, the size of a single record. To estimate the size of a\nrecord in MB, divide the size of your dataset by the number of records. To\nensure that the records fit within the maximum payload size, we recommend\nusing a slightly larger value. The default value is 6 MB.\n\n\nThe value of MaxPayloadInMB cannot be greater than 100 MB. If you specify\nthe MaxConcurrentTransforms parameter, the value of (MaxConcurrentTransforms\n* MaxPayloadInMB) also cannot exceed 100 MB.\n\n\nFor cases where the payload might be arbitrarily large and is transmitted\nusing HTTP chunked encoding, set the value to 0. This feature works only\nin supported algorithms. Currently, Amazon SageMaker built-in algorithms\ndo not support HTTP chunked encoding." format: "int64" type: "integer" modelClientConfig: - description: "Configures the timeout and maximum number of retries for processing a transform job invocation." + description: "Configures the timeout and maximum number of retries for processing a transform\njob invocation." properties: invocationsMaxRetries: format: "int64" @@ -84,12 +84,12 @@ spec: type: "integer" type: "object" modelName: - description: "The name of the model that you want to use for the transform job. ModelName must be the name of an existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services account." + description: "The name of the model that you want to use for the transform job. ModelName\nmust be the name of an existing Amazon SageMaker model within an Amazon Web\nServices Region in an Amazon Web Services account." type: "string" tags: - description: "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) in the Amazon Web Services Billing and Cost Management User Guide." + description: "(Optional) An array of key-value pairs. For more information, see Using Cost\nAllocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what)\nin the Amazon Web Services Billing and Cost Management User Guide." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -120,7 +120,7 @@ spec: type: "string" type: "object" transformJobName: - description: "The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account." + description: "The name of the transform job. The name must be unique within an Amazon Web\nServices Region in an Amazon Web Services account." type: "string" transformOutput: description: "Describes the results of the transform job." @@ -135,7 +135,7 @@ spec: type: "string" type: "object" transformResources: - description: "Describes the resources, including ML instance types and ML instance count, to use for the transform job." + description: "Describes the resources, including ML instance types and ML instance count,\nto use for the transform job." properties: instanceCount: format: "int64" @@ -156,13 +156,13 @@ spec: description: "TransformJobStatus defines the observed state of TransformJob" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -172,9 +172,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." @@ -198,10 +198,10 @@ spec: type: "object" type: "array" failureReason: - description: "If the transform job failed, FailureReason describes why it failed. A transform job creates a log file, which includes error messages, and stores it as an Amazon S3 object. For more information, see Log Amazon SageMaker Events with Amazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html)." + description: "If the transform job failed, FailureReason describes why it failed. A transform\njob creates a log file, which includes error messages, and stores it as an\nAmazon S3 object. For more information, see Log Amazon SageMaker Events with\nAmazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html)." type: "string" transformJobStatus: - description: "The status of the transform job. If the transform job failed, the reason is returned in the FailureReason field." + description: "The status of the transform job. If the transform job failed, the reason\nis returned in the FailureReason field." type: "string" type: "object" type: "object" diff --git a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/userprofiles.yaml b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/userprofiles.yaml index 338c85b25..7fe65a49d 100644 --- a/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/userprofiles.yaml +++ b/crd-catalog/aws-controllers-k8s/sagemaker-controller/sagemaker.services.k8s.aws/v1alpha1/userprofiles.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "userprofiles.sagemaker.services.k8s.aws" spec: group: "sagemaker.services.k8s.aws" @@ -23,10 +23,10 @@ spec: description: "UserProfile is the Schema for the UserProfiles API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -37,15 +37,15 @@ spec: description: "The ID of the associated Domain." type: "string" singleSignOnUserIdentifier: - description: "A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is \"UserName\". If the Domain's AuthMode is IAM Identity Center, this field is required. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified." + description: "A specifier for the type of value specified in SingleSignOnUserValue. Currently,\nthe only supported value is \"UserName\". If the Domain's AuthMode is IAM Identity\nCenter, this field is required. If the Domain's AuthMode is not IAM Identity\nCenter, this field cannot be specified." type: "string" singleSignOnUserValue: - description: "The username of the associated Amazon Web Services Single Sign-On User for this UserProfile. If the Domain's AuthMode is IAM Identity Center, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified." + description: "The username of the associated Amazon Web Services Single Sign-On User for\nthis UserProfile. If the Domain's AuthMode is IAM Identity Center, this field\nis required, and must match a valid username of a user in your directory.\nIf the Domain's AuthMode is not IAM Identity Center, this field cannot be\nspecified." type: "string" tags: - description: "Each tag consists of a key and an optional value. Tag keys must be unique per resource. \n Tags that you specify for the User Profile are also added to all Apps that the User Profile launches." + description: "Each tag consists of a key and an optional value. Tag keys must be unique\nper resource.\n\n\nTags that you specify for the User Profile are also added to all Apps that\nthe User Profile launches." items: - description: "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. \n You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. \n For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." + description: "A tag object that consists of a key and an optional value, used to manage\nmetadata for SageMaker Amazon Web Services resources.\n\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning\njobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations,\nand endpoints. For more information on adding tags to SageMaker resources,\nsee AddTags.\n\n\nFor more information on adding metadata to your Amazon Web Services resources\nwith tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html).\nFor advice on best practices for managing Amazon Web Services resources with\ntagging, see Tagging Best Practices: Implement an Effective Amazon Web Services\nResource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf)." properties: key: type: "string" @@ -65,7 +65,7 @@ spec: description: "The JupyterServer app settings." properties: defaultResourceSpec: - description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and\nthe instance type that the version runs on." properties: instanceType: type: "string" @@ -86,7 +86,7 @@ spec: properties: customImages: items: - description: "A custom SageMaker image. For more information, see Bring your own SageMaker image (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html)." + description: "A custom SageMaker image. For more information, see Bring your own SageMaker\nimage (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html)." properties: appImageConfigName: type: "string" @@ -98,7 +98,7 @@ spec: type: "object" type: "array" defaultResourceSpec: - description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and\nthe instance type that the version runs on." properties: instanceType: type: "string" @@ -115,7 +115,7 @@ spec: type: "array" type: "object" rStudioServerProAppSettings: - description: "A collection of settings that configure user interaction with the RStudioServerPro app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro app must be deleted and a new one created to make any changes." + description: "A collection of settings that configure user interaction with the RStudioServerPro\napp. RStudioServerProAppSettings cannot be updated. The RStudioServerPro\napp must be deleted and a new one created to make any changes." properties: accessStatus: type: "string" @@ -127,7 +127,7 @@ spec: type: "string" type: "array" sharingSettings: - description: "Specifies options for sharing SageMaker Studio notebooks. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called. When SharingSettings is not specified, notebook sharing isn't allowed." + description: "Specifies options for sharing SageMaker Studio notebooks. These settings\nare specified as part of DefaultUserSettings when the CreateDomain API is\ncalled, and as part of UserSettings when the CreateUserProfile API is called.\nWhen SharingSettings is not specified, notebook sharing isn't allowed." properties: notebookOutputOption: type: "string" @@ -140,7 +140,7 @@ spec: description: "The TensorBoard app settings." properties: defaultResourceSpec: - description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on." + description: "Specifies the ARN's of a SageMaker image and SageMaker image version, and\nthe instance type that the version runs on." properties: instanceType: type: "string" @@ -161,13 +161,13 @@ spec: description: "UserProfileStatus defines the observed state of UserProfile" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -177,9 +177,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/activities.yaml b/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/activities.yaml index a4cde1bfa..d504b0d02 100644 --- a/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/activities.yaml +++ b/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/activities.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "activities.sfn.services.k8s.aws" spec: group: "sfn.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "Activity is the Schema for the Activities API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,12 +30,12 @@ spec: description: "ActivitySpec defines the desired state of Activity." properties: name: - description: "The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) in the AWS Step Functions Developer Guide. \n A name must not contain: \n * white space \n * brackets < > { } [ ] \n * wildcard characters ? * \n * special characters \" # % \\ ^ | ~ ` $ & , ; : / \n * control characters (U+0000-001F, U+007F-009F) \n To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _." + description: "The name of the activity to create. This name must be unique for your AWS\naccount and region for 90 days. For more information, see Limits Related\nto State Machine Executions (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions)\nin the AWS Step Functions Developer Guide.\n\n\nA name must not contain:\n\n\n * white space\n\n\n * brackets < > { } [ ]\n\n\n * wildcard characters ? *\n\n\n * special characters \" # % \\ ^ | ~ ` $ & , ; : /\n\n\n * control characters (U+0000-001F, U+007F-009F)\n\n\nTo enable logging with CloudWatch Logs, the name should only contain 0-9,\nA-Z, a-z, - and _." type: "string" tags: - description: "The list of tags to add to a resource. \n An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). \n Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @." + description: "The list of tags to add to a resource.\n\n\nAn array of key-value pairs. For more information, see Using Cost Allocation\nTags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)\nin the AWS Billing and Cost Management User Guide, and Controlling Access\nUsing IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html).\n\n\nTags may only contain Unicode letters, digits, white space, or these symbols:\n_ . : / = + - @." items: - description: "Tags are key-value pairs that can be associated with Step Functions state machines and activities. \n An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). \n Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @." + description: "Tags are key-value pairs that can be associated with Step Functions state\nmachines and activities.\n\n\nAn array of key-value pairs. For more information, see Using Cost Allocation\nTags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)\nin the AWS Billing and Cost Management User Guide, and Controlling Access\nUsing IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html).\n\n\nTags may only contain Unicode letters, digits, white space, or these symbols:\n_ . : / = + - @." properties: key: type: "string" @@ -50,13 +50,13 @@ spec: description: "ActivityStatus defines the observed state of Activity" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -66,9 +66,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/statemachines.yaml b/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/statemachines.yaml index 82b4c6df8..de12fd31c 100644 --- a/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/statemachines.yaml +++ b/crd-catalog/aws-controllers-k8s/sfn-controller/sfn.services.k8s.aws/v1alpha1/statemachines.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.9.2" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "statemachines.sfn.services.k8s.aws" spec: group: "sfn.services.k8s.aws" @@ -19,10 +19,10 @@ spec: description: "StateMachine is the Schema for the StateMachines API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,10 +30,10 @@ spec: description: "StateMachineSpec defines the desired state of StateMachine." properties: definition: - description: "The Amazon States Language definition of the state machine. See Amazon States Language (https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html)." + description: "The Amazon States Language definition of the state machine. See Amazon States\nLanguage (https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html)." type: "string" loggingConfiguration: - description: "Defines what execution history events are logged and where they are logged. \n By default, the level is set to OFF. For more information see Log Levels (https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide." + description: "Defines what execution history events are logged and where they are logged.\n\n\nBy default, the level is set to OFF. For more information see Log Levels\n(https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html)\nin the AWS Step Functions User Guide." properties: destinations: items: @@ -51,15 +51,15 @@ spec: type: "string" type: "object" name: - description: "The name of the state machine. \n A name must not contain: \n * white space \n * brackets < > { } [ ] \n * wildcard characters ? * \n * special characters \" # % \\ ^ | ~ ` $ & , ; : / \n * control characters (U+0000-001F, U+007F-009F) \n To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _." + description: "The name of the state machine.\n\n\nA name must not contain:\n\n\n * white space\n\n\n * brackets < > { } [ ]\n\n\n * wildcard characters ? *\n\n\n * special characters \" # % \\ ^ | ~ ` $ & , ; : /\n\n\n * control characters (U+0000-001F, U+007F-009F)\n\n\nTo enable logging with CloudWatch Logs, the name should only contain 0-9,\nA-Z, a-z, - and _." type: "string" roleARN: description: "The Amazon Resource Name (ARN) of the IAM role to use for this state machine." type: "string" tags: - description: "Tags to be added when creating a state machine. \n An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). \n Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @." + description: "Tags to be added when creating a state machine.\n\n\nAn array of key-value pairs. For more information, see Using Cost Allocation\nTags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)\nin the AWS Billing and Cost Management User Guide, and Controlling Access\nUsing IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html).\n\n\nTags may only contain Unicode letters, digits, white space, or these symbols:\n_ . : / = + - @." items: - description: "Tags are key-value pairs that can be associated with Step Functions state machines and activities. \n An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). \n Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @." + description: "Tags are key-value pairs that can be associated with Step Functions state\nmachines and activities.\n\n\nAn array of key-value pairs. For more information, see Using Cost Allocation\nTags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)\nin the AWS Billing and Cost Management User Guide, and Controlling Access\nUsing IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html).\n\n\nTags may only contain Unicode letters, digits, white space, or these symbols:\n_ . : / = + - @." properties: key: type: "string" @@ -74,7 +74,7 @@ spec: type: "boolean" type: "object" type_: - description: "Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created." + description: "Determines whether a Standard or Express state machine is created. The default\nis STANDARD. You cannot update the type of a state machine once it has been\ncreated." type: "string" required: - "definition" @@ -85,13 +85,13 @@ spec: description: "StateMachineStatus defines the observed state of StateMachine" properties: ackResourceMetadata: - description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource" + description: "All CRs managed by ACK have a common `Status.ACKResourceMetadata` member\nthat is used to contain resource sync state, account ownership,\nconstructed ARN for the resource" properties: arn: - description: "ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an \"adopted\" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270" + description: "ARN is the Amazon Resource Name for the resource. This is a\nglobally-unique identifier and is set only by the ACK service controller\nonce the controller has orchestrated the creation of the resource OR\nwhen it has verified that an \"adopted\" resource (a resource where the\nARN annotation was set by the Kubernetes user on the CR) exists and\nmatches the supplied CR's Spec field values.\nTODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse\nhttps://github.com/aws/aws-controllers-k8s/issues/270" type: "string" ownerAccountID: - description: "OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource." + description: "OwnerAccountID is the AWS Account ID of the account that owns the\nbackend AWS service API resource." type: "string" region: description: "Region is the AWS region in which the resource exists or will exist." @@ -101,9 +101,9 @@ spec: - "region" type: "object" conditions: - description: "All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource" + description: "All CRS managed by ACK have a common `Status.Conditions` member that\ncontains a collection of `ackv1alpha1.Condition` objects that describe\nthe various terminal states of the CR and its backend AWS service API\nresource" items: - description: "Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource" + description: "Condition is the common struct used by all CRDs managed by ACK service\ncontrollers to indicate terminal states of the CR and its backend AWS\nservice API resource" properties: lastTransitionTime: description: "Last time the condition transitioned from one status to another." diff --git a/crd-catalog/aws/karpenter-provider-aws/karpenter.k8s.aws/v1beta1/ec2nodeclasses.yaml b/crd-catalog/aws/karpenter-provider-aws/karpenter.k8s.aws/v1beta1/ec2nodeclasses.yaml index 51dbaa319..b00572351 100644 --- a/crd-catalog/aws/karpenter-provider-aws/karpenter.k8s.aws/v1beta1/ec2nodeclasses.yaml +++ b/crd-catalog/aws/karpenter-provider-aws/karpenter.k8s.aws/v1beta1/ec2nodeclasses.yaml @@ -76,6 +76,9 @@ spec: rule: "self.all(x, has(x.tags) || has(x.id) || has(x.name))" - message: "'id' is mutually exclusive, cannot be set with a combination of other fields in amiSelectorTerms" rule: "!self.all(x, has(x.id) && (has(x.tags) || has(x.name) || has(x.owner)))" + associatePublicIPAddress: + description: "AssociatePublicIPAddress controls if public IP addresses are assigned to instances that are launched with the nodeclass." + type: "boolean" blockDeviceMappings: description: "BlockDeviceMappings to be applied to provisioned nodes." items: diff --git a/crd-catalog/aws/karpenter-provider-aws/karpenter.sh/v1beta1/nodepools.yaml b/crd-catalog/aws/karpenter-provider-aws/karpenter.sh/v1beta1/nodepools.yaml index 586f396a2..92cdf7c08 100644 --- a/crd-catalog/aws/karpenter-provider-aws/karpenter.sh/v1beta1/nodepools.yaml +++ b/crd-catalog/aws/karpenter-provider-aws/karpenter.sh/v1beta1/nodepools.yaml @@ -72,7 +72,7 @@ spec: type: "array" x-kubernetes-validations: - message: "'schedule' must be set with 'duration'" - rule: "!self.all(x, (has(x.schedule) && !has(x.duration)) || (!has(x.schedule) && has(x.duration)))" + rule: "self.all(x, has(x.schedule) == has(x.duration))" consolidateAfter: description: "ConsolidateAfter is the duration the controller will wait\nbefore attempting to terminate nodes that are underutilized.\nRefer to ConsolidationPolicy for how underutilization is considered." pattern: "^(([0-9]+(s|m|h))+)|(Never)$" diff --git a/crd-catalog/backube/snapscheduler/snapscheduler.backube/v1/snapshotschedules.yaml b/crd-catalog/backube/snapscheduler/snapscheduler.backube/v1/snapshotschedules.yaml index d6b4d5ea1..379f1ec9c 100644 --- a/crd-catalog/backube/snapscheduler/snapscheduler.backube/v1/snapshotschedules.yaml +++ b/crd-catalog/backube/snapscheduler/snapscheduler.backube/v1/snapshotschedules.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.13.0" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "snapshotschedules.snapscheduler.backube" spec: group: "snapscheduler.backube" @@ -35,10 +35,10 @@ spec: description: "SnapshotSchedule defines a schedule for taking automated snapshots of PVC(s)" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -51,16 +51,16 @@ spec: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -72,7 +72,7 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" @@ -83,7 +83,7 @@ spec: description: "Retention determines how long this schedule's snapshots will be kept." properties: expires: - description: "The length of time (time.Duration) after which a given Snapshot will be deleted." + description: "The length of time (time.Duration) after which a given Snapshot will be\ndeleted." pattern: "^\\d+(h|m|s)$" type: "string" maxCount: @@ -93,7 +93,7 @@ spec: type: "integer" type: "object" schedule: - description: "Schedule is a Cronspec specifying when snapshots should be taken. See https://en.wikipedia.org/wiki/Cron for a description of the format." + description: "Schedule is a Cronspec specifying when snapshots should be taken. See\nhttps://en.wikipedia.org/wiki/Cron for a description of the format." pattern: "^(@(annually|yearly|monthly|weekly|daily|hourly))|((((\\d+,)*\\d+|(\\d+(\\/|-)\\d+)|\\*(\\/\\d+)?)\\s?){5})$" type: "string" snapshotTemplate: @@ -102,7 +102,7 @@ spec: labels: additionalProperties: type: "string" - description: "A list of labels that should be added to each Snapshot created by this schedule." + description: "A list of labels that should be added to each Snapshot created by this\nschedule." type: "object" snapshotClassName: description: "The name of the VolumeSnapshotClass to be used when creating Snapshots." @@ -115,7 +115,7 @@ spec: conditions: description: "Conditions is a list of conditions related to operator reconciliation." items: - description: "Condition represents the state of the operator's reconciliation functionality." + description: "Condition represents the state of the operator's\nreconciliation functionality." properties: lastHeartbeatTime: format: "date-time" diff --git a/crd-catalog/berops/claudie/claudie.io/v1beta1/inputmanifests.yaml b/crd-catalog/berops/claudie/claudie.io/v1beta1/inputmanifests.yaml index f36394761..54a8fca1b 100644 --- a/crd-catalog/berops/claudie/claudie.io/v1beta1/inputmanifests.yaml +++ b/crd-catalog/berops/claudie/claudie.io/v1beta1/inputmanifests.yaml @@ -223,14 +223,13 @@ spec: required: - "name" - "region" - - "zone" type: "object" serverType: description: "Type of the machines in the nodepool. Currently, only AMD64 machines are supported." type: "string" storageDiskSize: - description: "Size of the storage disk on the nodes in the nodepool in GB. The OS disk is created automatically with predefined size of 100GB for kubernetes nodes and 50GB for Loadbalancer nodes." - format: "int64" + description: "Size of the storage disk on the nodes in the nodepool in GB. The OS disk is created automatically with predefined size of 100GB for kubernetes nodes and 50GB for Loadbalancer nodes. The value must be either -1 (no disk is created), or >= 50. If no value is specified, 50 is used." + format: "int32" type: "integer" taints: description: "User defined taints for this nodepool." @@ -346,6 +345,7 @@ spec: - "azure" - "cloudflare" - "hetznerdns" + - "genesiscloud" type: "string" secretRef: description: "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace" diff --git a/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/clusterissuers.yaml b/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/clusterissuers.yaml index 3717cd1f9..da6b51c9d 100644 --- a/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/clusterissuers.yaml +++ b/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/clusterissuers.yaml @@ -1179,11 +1179,6 @@ spec: kubernetes: description: "Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server." properties: - audiences: - description: "TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included." - items: - type: "string" - type: "array" mountPath: description: "The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value \"/v1/auth/kubernetes\" will be used." type: "string" @@ -1205,6 +1200,11 @@ spec: serviceAccountRef: description: "A reference to a service account that will be used to request a bound token (also known as \"projected token\"). Compared to using \"secretRef\", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token." properties: + audiences: + description: "TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included." + items: + type: "string" + type: "array" name: description: "Name of the ServiceAccount used to request a token." type: "string" diff --git a/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/issuers.yaml b/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/issuers.yaml index 5742866f8..8d0412e19 100644 --- a/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/issuers.yaml +++ b/crd-catalog/cert-manager/cert-manager/cert-manager.io/v1/issuers.yaml @@ -1179,11 +1179,6 @@ spec: kubernetes: description: "Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server." properties: - audiences: - description: "TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included." - items: - type: "string" - type: "array" mountPath: description: "The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value \"/v1/auth/kubernetes\" will be used." type: "string" @@ -1205,6 +1200,11 @@ spec: serviceAccountRef: description: "A reference to a service account that will be used to request a bound token (also known as \"projected token\"). Compared to using \"secretRef\", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token." properties: + audiences: + description: "TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included." + items: + type: "string" + type: "array" name: description: "Name of the ServiceAccount used to request a token." type: "string" diff --git a/crd-catalog/cert-manager/trust-manager/trust.cert-manager.io/v1alpha1/bundles.yaml b/crd-catalog/cert-manager/trust-manager/trust.cert-manager.io/v1alpha1/bundles.yaml index 33f15426f..5f0987b72 100644 --- a/crd-catalog/cert-manager/trust-manager/trust.cert-manager.io/v1alpha1/bundles.yaml +++ b/crd-catalog/cert-manager/trust-manager/trust.cert-manager.io/v1alpha1/bundles.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.13.0" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "bundles.trust.cert-manager.io" spec: group: "trust.cert-manager.io" @@ -35,10 +35,10 @@ spec: openAPIV3Schema: properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -48,33 +48,33 @@ spec: sources: description: "Sources is a set of references to data whose data will sync to the target." items: - description: "BundleSource is the set of sources whose data will be appended and synced to the BundleTarget in all Namespaces." + description: "BundleSource is the set of sources whose data will be appended and synced to\nthe BundleTarget in all Namespaces." properties: configMap: - description: "ConfigMap is a reference (by name) to a ConfigMap's `data` key, or to a list of ConfigMap's `data` key using label selector, in the trust Namespace." + description: "ConfigMap is a reference (by name) to a ConfigMap's `data` key, or to a\nlist of ConfigMap's `data` key using label selector, in the trust Namespace." properties: key: description: "Key is the key of the entry in the object's `data` field to be used." type: "string" name: - description: "Name is the name of the source object in the trust Namespace. This field must be left empty when `selector` is set" + description: "Name is the name of the source object in the trust Namespace.\nThis field must be left empty when `selector` is set" type: "string" selector: - description: "Selector is the label selector to use to fetch a list of objects. Must not be set when `Name` is set." + description: "Selector is the label selector to use to fetch a list of objects. Must not be set\nwhen `Name` is set." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -86,7 +86,7 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" @@ -97,30 +97,30 @@ spec: description: "InLine is a simple string to append as the source data." type: "string" secret: - description: "Secret is a reference (by name) to a Secret's `data` key, or to a list of Secret's `data` key using label selector, in the trust Namespace." + description: "Secret is a reference (by name) to a Secret's `data` key, or to a\nlist of Secret's `data` key using label selector, in the trust Namespace." properties: key: description: "Key is the key of the entry in the object's `data` field to be used." type: "string" name: - description: "Name is the name of the source object in the trust Namespace. This field must be left empty when `selector` is set" + description: "Name is the name of the source object in the trust Namespace.\nThis field must be left empty when `selector` is set" type: "string" selector: - description: "Selector is the label selector to use to fetch a list of objects. Must not be set when `Name` is set." + description: "Selector is the label selector to use to fetch a list of objects. Must not be set\nwhen `Name` is set." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -132,7 +132,7 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" @@ -140,7 +140,7 @@ spec: - "key" type: "object" useDefaultCAs: - description: "UseDefaultCAs, when true, requests the default CA bundle to be used as a source. Default CAs are available if trust-manager was installed via Helm or was otherwise set up to include a package-injecting init container by using the \"--default-package-location\" flag when starting the trust-manager controller. If default CAs were not configured at start-up, any request to use the default CAs will fail. The version of the default CA package which is used for a Bundle is stored in the defaultCAPackageVersion field of the Bundle's status field." + description: "UseDefaultCAs, when true, requests the default CA bundle to be used as a source.\nDefault CAs are available if trust-manager was installed via Helm\nor was otherwise set up to include a package-injecting init container by using the\n\"--default-package-location\" flag when starting the trust-manager controller.\nIf default CAs were not configured at start-up, any request to use the default\nCAs will fail.\nThe version of the default CA package which is used for a Bundle is stored in the\ndefaultCAPackageVersion field of the Bundle's status field." type: "boolean" type: "object" type: "array" @@ -151,7 +151,7 @@ spec: description: "AdditionalFormats specifies any additional formats to write to the target" properties: jks: - description: "JKS requests a JKS-formatted binary trust bundle to be written to the target. The bundle has \"changeit\" as the default password. For more information refer to this link https://cert-manager.io/docs/faq/#keystore-passwords" + description: "JKS requests a JKS-formatted binary trust bundle to be written to the target.\nThe bundle has \"changeit\" as the default password.\nFor more information refer to this link https://cert-manager.io/docs/faq/#keystore-passwords" properties: key: description: "Key is the key of the entry in the object's `data` field to be used." @@ -166,7 +166,7 @@ spec: - "key" type: "object" pkcs12: - description: "PKCS12 requests a PKCS12-formatted binary trust bundle to be written to the target. The bundle is by default created without a password." + description: "PKCS12 requests a PKCS12-formatted binary trust bundle to be written to the target.\nThe bundle is by default created without a password." properties: key: description: "Key is the key of the entry in the object's `data` field to be used." @@ -181,7 +181,7 @@ spec: type: "object" type: "object" configMap: - description: "ConfigMap is the target ConfigMap in Namespaces that all Bundle source data will be synced to." + description: "ConfigMap is the target ConfigMap in Namespaces that all Bundle source\ndata will be synced to." properties: key: description: "Key is the key of the entry in the object's `data` field to be used." @@ -190,16 +190,16 @@ spec: - "key" type: "object" namespaceSelector: - description: "NamespaceSelector will, if set, only sync the target resource in Namespaces which match the selector." + description: "NamespaceSelector will, if set, only sync the target resource in\nNamespaces which match the selector." properties: matchLabels: additionalProperties: type: "string" - description: "MatchLabels matches on the set of labels that must be present on a Namespace for the Bundle target to be synced there." + description: "MatchLabels matches on the set of labels that must be present on a\nNamespace for the Bundle target to be synced there." type: "object" type: "object" secret: - description: "Secret is the target Secret that all Bundle source data will be synced to. Using Secrets as targets is only supported if enabled at trust-manager startup. By default, trust-manager has no permissions for writing to secrets and can only read secrets in the trust namespace." + description: "Secret is the target Secret that all Bundle source data will be synced to.\nUsing Secrets as targets is only supported if enabled at trust-manager startup.\nBy default, trust-manager has no permissions for writing to secrets and can only read secrets in the trust namespace." properties: key: description: "Key is the key of the entry in the object's `data` field to be used." @@ -216,25 +216,25 @@ spec: description: "Status of the Bundle. This is set and managed automatically." properties: conditions: - description: "List of status conditions to indicate the status of the Bundle. Known condition types are `Bundle`." + description: "List of status conditions to indicate the status of the Bundle.\nKnown condition types are `Bundle`." items: description: "BundleCondition contains condition information for a Bundle." properties: lastTransitionTime: - description: "LastTransitionTime is the timestamp corresponding to the last status change of this condition." + description: "LastTransitionTime is the timestamp corresponding to the last status\nchange of this condition." format: "date-time" type: "string" message: - description: "Message is a human-readable description of the details of the last transition, complementing reason." + description: "Message is a human-readable description of the details of the last\ntransition, complementing reason." maxLength: 32768 type: "string" observedGeneration: - description: "If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Bundle." + description: "If set, this represents the .metadata.generation that the condition was\nset based upon.\nFor instance, if .metadata.generation is currently 12, but the\n.status.condition[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the Bundle." format: "int64" minimum: 0.0 type: "integer" reason: - description: "Reason is a brief machine-readable explanation for the condition's last transition. The value should be a CamelCase string. This field may not be empty." + description: "Reason is a brief machine-readable explanation for the condition's last\ntransition.\nThe value should be a CamelCase string.\nThis field may not be empty." maxLength: 1024 minLength: 1 pattern: "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$" @@ -262,7 +262,7 @@ spec: - "type" x-kubernetes-list-type: "map" defaultCAVersion: - description: "DefaultCAPackageVersion, if set and non-empty, indicates the version information which was retrieved when the set of default CAs was requested in the bundle source. This should only be set if useDefaultCAs was set to \"true\" on a source, and will be the same for the same version of a bundle with identical certificates." + description: "DefaultCAPackageVersion, if set and non-empty, indicates the version information\nwhich was retrieved when the set of default CAs was requested in the bundle\nsource. This should only be set if useDefaultCAs was set to \"true\" on a source,\nand will be the same for the same version of a bundle with identical certificates." type: "string" type: "object" required: diff --git a/crd-catalog/cilium/cilium/cilium.io/v2alpha1/ciliumbgppeeringpolicies.yaml b/crd-catalog/cilium/cilium/cilium.io/v2alpha1/ciliumbgppeeringpolicies.yaml index e913a3e12..5ba3aeeef 100644 --- a/crd-catalog/cilium/cilium/cilium.io/v2alpha1/ciliumbgppeeringpolicies.yaml +++ b/crd-catalog/cilium/cilium/cilium.io/v2alpha1/ciliumbgppeeringpolicies.yaml @@ -112,12 +112,33 @@ spec: type: "string" type: "array" standard: - description: "Standard holds a list of \"standard\" 32-bit BGP Communities Attribute (RFC 1997) values." + description: "Standard holds a list of \"standard\" 32-bit BGP Communities Attribute (RFC 1997) values defined as numeric values." items: - description: "BGPStandardCommunity type represents a value of the \"standard\" 32-bit BGP Communities Attribute (RFC 1997) as a 4-byte decimal number or two 2-byte decimal numbers separated by a colon." + description: "BGPStandardCommunity type represents a value of the \"standard\" 32-bit BGP Communities Attribute (RFC 1997) as a 4-byte decimal number or two 2-byte decimal numbers separated by a colon (<0-65535>:<0-65535>). For example, no-export community value is 65553:65281." pattern: "^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$|^([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5]):([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$" type: "string" type: "array" + wellKnown: + description: "WellKnown holds a list \"standard\" 32-bit BGP Communities Attribute (RFC 1997) values defined as well-known string aliases to their numeric values." + items: + description: "BGPWellKnownCommunity type represents a value of the \"standard\" 32-bit BGP Communities Attribute (RFC 1997) as a well-known string alias to its numeric value. Allowed values and their mapping to the numeric values: \n internet = 0x00000000 (0:0) planned-shut = 0xffff0000 (65535:0) accept-own = 0xffff0001 (65535:1) route-filter-translated-v4 = 0xffff0002 (65535:2) route-filter-v4 = 0xffff0003 (65535:3) route-filter-translated-v6 = 0xffff0004 (65535:4) route-filter-v6 = 0xffff0005 (65535:5) llgr-stale = 0xffff0006 (65535:6) no-llgr = 0xffff0007 (65535:7) blackhole = 0xffff029a (65535:666) no-export = 0xffffff01\t(65535:65281) no-advertise = 0xffffff02 (65535:65282) no-export-subconfed = 0xffffff03 (65535:65283) no-peer = 0xffffff04 (65535:65284)" + enum: + - "internet" + - "planned-shut" + - "accept-own" + - "route-filter-translated-v4" + - "route-filter-v4" + - "route-filter-translated-v6" + - "route-filter-v6" + - "llgr-stale" + - "no-llgr" + - "blackhole" + - "no-export" + - "no-advertise" + - "no-export-subconfed" + - "no-peer" + type: "string" + type: "array" type: "object" localPreference: description: "LocalPreference defines the preference value advertised in the BGP Local Preference path attribute. As Local Preference is only valid for iBGP peers, this value will be ignored for eBGP peers (no Local Preference path attribute will be advertised). If nil / not set, the default Local Preference of 100 will be advertised in the Local Preference path attribute for iBGP peers." diff --git a/crd-catalog/cryostatio/cryostat-operator/operator.cryostat.io/v1beta1/cryostats.yaml b/crd-catalog/cryostatio/cryostat-operator/operator.cryostat.io/v1beta1/cryostats.yaml index b45d24df5..c2e538832 100644 --- a/crd-catalog/cryostatio/cryostat-operator/operator.cryostat.io/v1beta1/cryostats.yaml +++ b/crd-catalog/cryostatio/cryostat-operator/operator.cryostat.io/v1beta1/cryostats.yaml @@ -2532,8 +2532,25 @@ spec: description: "Options to configure the Cryostat application's target discovery mechanisms." properties: builtInDiscoveryDisabled: - description: "When true, the Cryostat application will disable the built-in discovery mechanisms. Defaults to false" + description: "When true, the Cryostat application will disable the built-in discovery mechanisms. Defaults to false." type: "boolean" + disableBuiltInPortNames: + description: "When true, the Cryostat application will use the default port name jfr-jmx to look for JMX connectable targets." + type: "boolean" + disableBuiltInPortNumbers: + description: "When true, the Cryostat application will use the default port number 9091 to look for JMX connectable targets." + type: "boolean" + discoveryPortNames: + description: "List of port names that the Cryostat application should look for in order to consider a target as JMX connectable." + items: + type: "string" + type: "array" + discoveryPortNumbers: + description: "List of port numbers that the Cryostat application should look for in order to consider a target as JMX connectable." + items: + format: "int32" + type: "integer" + type: "array" type: "object" trustedCertSecrets: description: "List of TLS certificates to trust when connecting to targets." diff --git a/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/clustersecretstores.yaml b/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/clustersecretstores.yaml index a7b359ca3..05f6a2a89 100644 --- a/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/clustersecretstores.yaml +++ b/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/clustersecretstores.yaml @@ -490,6 +490,45 @@ spec: required: - "vaultUrl" type: "object" + chef: + description: "Chef configures this store to sync secrets with chef server" + properties: + auth: + description: "Auth defines the information necessary to authenticate against chef Server" + properties: + secretRef: + description: "ChefAuthSecretRef holds secret references for chef server login credentials." + properties: + privateKeySecretRef: + description: "SecretKey is the Signing Key in PEM format, used for authentication." + properties: + key: + description: "The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be\ndefaulted, in others it may be required." + type: "string" + name: + description: "The name of the Secret resource being referred to." + type: "string" + namespace: + description: "Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults\nto the namespace of the referent." + type: "string" + type: "object" + required: + - "privateKeySecretRef" + type: "object" + required: + - "secretRef" + type: "object" + serverUrl: + description: "ServerURL is the chef server URL used to connect to. If using orgs you should include your org in the url and terminate the url with a \"/\"" + type: "string" + username: + description: "UserName should be the user ID on the chef server" + type: "string" + required: + - "auth" + - "serverUrl" + - "username" + type: "object" conjur: description: "Conjur configures this store to sync secrets using conjur provider" properties: @@ -1153,6 +1192,41 @@ spec: - "region" - "vault" type: "object" + pulumi: + description: "Pulumi configures this store to sync secrets using the Pulumi provider" + properties: + accessToken: + description: "AccessToken is the access tokens to sign in to the Pulumi Cloud Console." + properties: + secretRef: + description: "SecretRef is a reference to a secret containing the Pulumi API token." + properties: + key: + description: "The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be\ndefaulted, in others it may be required." + type: "string" + name: + description: "The name of the Secret resource being referred to." + type: "string" + namespace: + description: "Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults\nto the namespace of the referent." + type: "string" + type: "object" + type: "object" + apiUrl: + default: "https://api.pulumi.com" + description: "APIURL is the URL of the Pulumi API." + type: "string" + environment: + description: "Environment are YAML documents composed of static key-value pairs, programmatic expressions,\ndynamically retrieved values from supported providers including all major clouds,\nand other Pulumi ESC environments.\nTo create a new environment, visit https://www.pulumi.com/docs/esc/environments/ for more information." + type: "string" + organization: + description: "Organization are a space to collaborate on shared projects and stacks.\nTo create a new organization, visit https://app.pulumi.com/ and click \"New Organization\"." + type: "string" + required: + - "accessToken" + - "environment" + - "organization" + type: "object" scaleway: description: "Scaleway" properties: diff --git a/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/secretstores.yaml b/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/secretstores.yaml index c9725369c..6e0e72f07 100644 --- a/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/secretstores.yaml +++ b/crd-catalog/external-secrets/external-secrets/external-secrets.io/v1beta1/secretstores.yaml @@ -490,6 +490,45 @@ spec: required: - "vaultUrl" type: "object" + chef: + description: "Chef configures this store to sync secrets with chef server" + properties: + auth: + description: "Auth defines the information necessary to authenticate against chef Server" + properties: + secretRef: + description: "ChefAuthSecretRef holds secret references for chef server login credentials." + properties: + privateKeySecretRef: + description: "SecretKey is the Signing Key in PEM format, used for authentication." + properties: + key: + description: "The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be\ndefaulted, in others it may be required." + type: "string" + name: + description: "The name of the Secret resource being referred to." + type: "string" + namespace: + description: "Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults\nto the namespace of the referent." + type: "string" + type: "object" + required: + - "privateKeySecretRef" + type: "object" + required: + - "secretRef" + type: "object" + serverUrl: + description: "ServerURL is the chef server URL used to connect to. If using orgs you should include your org in the url and terminate the url with a \"/\"" + type: "string" + username: + description: "UserName should be the user ID on the chef server" + type: "string" + required: + - "auth" + - "serverUrl" + - "username" + type: "object" conjur: description: "Conjur configures this store to sync secrets using conjur provider" properties: @@ -1153,6 +1192,41 @@ spec: - "region" - "vault" type: "object" + pulumi: + description: "Pulumi configures this store to sync secrets using the Pulumi provider" + properties: + accessToken: + description: "AccessToken is the access tokens to sign in to the Pulumi Cloud Console." + properties: + secretRef: + description: "SecretRef is a reference to a secret containing the Pulumi API token." + properties: + key: + description: "The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be\ndefaulted, in others it may be required." + type: "string" + name: + description: "The name of the Secret resource being referred to." + type: "string" + namespace: + description: "Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults\nto the namespace of the referent." + type: "string" + type: "object" + type: "object" + apiUrl: + default: "https://api.pulumi.com" + description: "APIURL is the URL of the Pulumi API." + type: "string" + environment: + description: "Environment are YAML documents composed of static key-value pairs, programmatic expressions,\ndynamically retrieved values from supported providers including all major clouds,\nand other Pulumi ESC environments.\nTo create a new environment, visit https://www.pulumi.com/docs/esc/environments/ for more information." + type: "string" + organization: + description: "Organization are a space to collaborate on shared projects and stacks.\nTo create a new organization, visit https://app.pulumi.com/ and click \"New Organization\"." + type: "string" + required: + - "accessToken" + - "environment" + - "organization" + type: "object" scaleway: description: "Scaleway" properties: diff --git a/crd-catalog/grafana/loki/loki.grafana.com/v1/lokistacks.yaml b/crd-catalog/grafana/loki/loki.grafana.com/v1/lokistacks.yaml index 32d68fc00..c6581072e 100644 --- a/crd-catalog/grafana/loki/loki.grafana.com/v1/lokistacks.yaml +++ b/crd-catalog/grafana/loki/loki.grafana.com/v1/lokistacks.yaml @@ -2460,6 +2460,13 @@ spec: storage: description: "Storage provides summary of all changes that have occurred to the storage configuration." properties: + credentialMode: + description: "CredentialMode contains the authentication mode used for accessing the object storage." + enum: + - "static" + - "token" + - "managed" + type: "string" schemas: description: "Schemas is a list of schemas which have been applied to the LokiStack." items: diff --git a/crd-catalog/gravitational/teleport/resources.teleport.dev/v5/teleportroles.yaml b/crd-catalog/gravitational/teleport/resources.teleport.dev/v5/teleportroles.yaml index d116ead61..af598b15d 100644 --- a/crd-catalog/gravitational/teleport/resources.teleport.dev/v5/teleportroles.yaml +++ b/crd-catalog/gravitational/teleport/resources.teleport.dev/v5/teleportroles.yaml @@ -412,6 +412,28 @@ spec: type: "string" type: "object" type: "array" + spiffe: + description: "SPIFFE is used to allow or deny access to a role holder to generating a SPIFFE SVID." + items: + properties: + dns_sans: + description: "DNSSANs specifies matchers for the SPIFFE ID DNS SANs. Each requested DNS SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: *.example.com would match foo.example.com" + items: + type: "string" + nullable: true + type: "array" + ip_sans: + description: "IPSANs specifies matchers for the SPIFFE ID IP SANs. Each requested IP SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matchers should be specified using CIDR notation, it supports IPv4 and IPv6. Examples: - 10.0.0.0/24 would match 10.0.0.0 to 10.255.255.255 - 10.0.0.42/32 would match only 10.0.0.42" + items: + type: "string" + nullable: true + type: "array" + path: + description: "Path specifies a matcher for the SPIFFE ID path. It should not include the trust domain and should start with a leading slash. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: - /svc/foo/*/bar would match /svc/foo/baz/bar - ^\\/svc\\/foo\\/.*\\/bar$ would match /svc/foo/baz/bar" + type: "string" + type: "object" + nullable: true + type: "array" windows_desktop_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -812,6 +834,28 @@ spec: type: "string" type: "object" type: "array" + spiffe: + description: "SPIFFE is used to allow or deny access to a role holder to generating a SPIFFE SVID." + items: + properties: + dns_sans: + description: "DNSSANs specifies matchers for the SPIFFE ID DNS SANs. Each requested DNS SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: *.example.com would match foo.example.com" + items: + type: "string" + nullable: true + type: "array" + ip_sans: + description: "IPSANs specifies matchers for the SPIFFE ID IP SANs. Each requested IP SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matchers should be specified using CIDR notation, it supports IPv4 and IPv6. Examples: - 10.0.0.0/24 would match 10.0.0.0 to 10.255.255.255 - 10.0.0.42/32 would match only 10.0.0.42" + items: + type: "string" + nullable: true + type: "array" + path: + description: "Path specifies a matcher for the SPIFFE ID path. It should not include the trust domain and should start with a leading slash. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: - /svc/foo/*/bar would match /svc/foo/baz/bar - ^\\/svc\\/foo\\/.*\\/bar$ would match /svc/foo/baz/bar" + type: "string" + type: "object" + nullable: true + type: "array" windows_desktop_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true diff --git a/crd-catalog/gravitational/teleport/resources.teleport.dev/v6/teleportroles.yaml b/crd-catalog/gravitational/teleport/resources.teleport.dev/v6/teleportroles.yaml index 454d61125..85bf1c89a 100644 --- a/crd-catalog/gravitational/teleport/resources.teleport.dev/v6/teleportroles.yaml +++ b/crd-catalog/gravitational/teleport/resources.teleport.dev/v6/teleportroles.yaml @@ -412,6 +412,28 @@ spec: type: "string" type: "object" type: "array" + spiffe: + description: "SPIFFE is used to allow or deny access to a role holder to generating a SPIFFE SVID." + items: + properties: + dns_sans: + description: "DNSSANs specifies matchers for the SPIFFE ID DNS SANs. Each requested DNS SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: *.example.com would match foo.example.com" + items: + type: "string" + nullable: true + type: "array" + ip_sans: + description: "IPSANs specifies matchers for the SPIFFE ID IP SANs. Each requested IP SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matchers should be specified using CIDR notation, it supports IPv4 and IPv6. Examples: - 10.0.0.0/24 would match 10.0.0.0 to 10.255.255.255 - 10.0.0.42/32 would match only 10.0.0.42" + items: + type: "string" + nullable: true + type: "array" + path: + description: "Path specifies a matcher for the SPIFFE ID path. It should not include the trust domain and should start with a leading slash. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: - /svc/foo/*/bar would match /svc/foo/baz/bar - ^\\/svc\\/foo\\/.*\\/bar$ would match /svc/foo/baz/bar" + type: "string" + type: "object" + nullable: true + type: "array" windows_desktop_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true @@ -812,6 +834,28 @@ spec: type: "string" type: "object" type: "array" + spiffe: + description: "SPIFFE is used to allow or deny access to a role holder to generating a SPIFFE SVID." + items: + properties: + dns_sans: + description: "DNSSANs specifies matchers for the SPIFFE ID DNS SANs. Each requested DNS SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: *.example.com would match foo.example.com" + items: + type: "string" + nullable: true + type: "array" + ip_sans: + description: "IPSANs specifies matchers for the SPIFFE ID IP SANs. Each requested IP SAN is compared against all matchers configured and if any match, the condition is considered to be met. The matchers should be specified using CIDR notation, it supports IPv4 and IPv6. Examples: - 10.0.0.0/24 would match 10.0.0.0 to 10.255.255.255 - 10.0.0.42/32 would match only 10.0.0.42" + items: + type: "string" + nullable: true + type: "array" + path: + description: "Path specifies a matcher for the SPIFFE ID path. It should not include the trust domain and should start with a leading slash. The matcher by default allows '*' to be used to indicate zero or more of any character. Prepend '^' and append '$' to instead switch to matching using the Go regex syntax. Example: - /svc/foo/*/bar would match /svc/foo/baz/bar - ^\\/svc\\/foo\\/.*\\/bar$ would match /svc/foo/baz/bar" + type: "string" + type: "object" + nullable: true + type: "array" windows_desktop_labels: additionalProperties: x-kubernetes-preserve-unknown-fields: true diff --git a/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/agentpools.yaml b/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/agentpools.yaml index b0ed8da0a..ac297e85f 100644 --- a/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/agentpools.yaml +++ b/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/agentpools.yaml @@ -179,7 +179,7 @@ spec: description: "Required. A pod affinity term, associated with the corresponding weight." properties: labelSelector: - description: "A label query over a set of resources, in this case pods." + description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." @@ -209,6 +209,18 @@ spec: type: "object" type: "object" x-kubernetes-map-type: "atomic" + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" + mismatchLabelKeys: + description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" namespaceSelector: description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." properties: @@ -266,7 +278,7 @@ spec: description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running" properties: labelSelector: - description: "A label query over a set of resources, in this case pods." + description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." @@ -296,6 +308,18 @@ spec: type: "object" type: "object" x-kubernetes-map-type: "atomic" + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" + mismatchLabelKeys: + description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" namespaceSelector: description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." properties: @@ -352,7 +376,7 @@ spec: description: "Required. A pod affinity term, associated with the corresponding weight." properties: labelSelector: - description: "A label query over a set of resources, in this case pods." + description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." @@ -382,6 +406,18 @@ spec: type: "object" type: "object" x-kubernetes-map-type: "atomic" + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" + mismatchLabelKeys: + description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" namespaceSelector: description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." properties: @@ -439,7 +475,7 @@ spec: description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running" properties: labelSelector: - description: "A label query over a set of resources, in this case pods." + description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." @@ -469,6 +505,18 @@ spec: type: "object" type: "object" x-kubernetes-map-type: "atomic" + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" + mismatchLabelKeys: + description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + items: + type: "string" + type: "array" + x-kubernetes-list-type: "atomic" namespaceSelector: description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." properties: @@ -706,6 +754,16 @@ spec: required: - "port" type: "object" + sleep: + description: "Sleep represents the duration that the container should sleep before being terminated." + properties: + seconds: + description: "Seconds is the number of seconds to sleep." + format: "int64" + type: "integer" + required: + - "seconds" + type: "object" tcpSocket: description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." properties: @@ -771,6 +829,16 @@ spec: required: - "port" type: "object" + sleep: + description: "Sleep represents the duration that the container should sleep before being terminated." + properties: + seconds: + description: "Seconds is the number of seconds to sleep." + format: "int64" + type: "integer" + required: + - "seconds" + type: "object" tcpSocket: description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." properties: @@ -1080,6 +1148,9 @@ spec: description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" + restartPolicy: + description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed." + type: "string" securityContext: description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" properties: @@ -1142,7 +1213,7 @@ spec: description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\"." + description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." @@ -1160,7 +1231,7 @@ spec: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." @@ -1556,6 +1627,16 @@ spec: required: - "port" type: "object" + sleep: + description: "Sleep represents the duration that the container should sleep before being terminated." + properties: + seconds: + description: "Seconds is the number of seconds to sleep." + format: "int64" + type: "integer" + required: + - "seconds" + type: "object" tcpSocket: description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." properties: @@ -1621,6 +1702,16 @@ spec: required: - "port" type: "object" + sleep: + description: "Sleep represents the duration that the container should sleep before being terminated." + properties: + seconds: + description: "Seconds is the number of seconds to sleep." + format: "int64" + type: "integer" + required: + - "seconds" + type: "object" tcpSocket: description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." properties: @@ -1930,6 +2021,9 @@ spec: description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" + restartPolicy: + description: "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers." + type: "string" securityContext: description: "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext." properties: @@ -1992,7 +2086,7 @@ spec: description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\"." + description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." @@ -2010,7 +2104,7 @@ spec: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." @@ -2418,6 +2512,16 @@ spec: required: - "port" type: "object" + sleep: + description: "Sleep represents the duration that the container should sleep before being terminated." + properties: + seconds: + description: "Seconds is the number of seconds to sleep." + format: "int64" + type: "integer" + required: + - "seconds" + type: "object" tcpSocket: description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." properties: @@ -2483,6 +2587,16 @@ spec: required: - "port" type: "object" + sleep: + description: "Sleep represents the duration that the container should sleep before being terminated." + properties: + seconds: + description: "Seconds is the number of seconds to sleep." + format: "int64" + type: "integer" + required: + - "seconds" + type: "object" tcpSocket: description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." properties: @@ -2792,6 +2906,9 @@ spec: description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" + restartPolicy: + description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed." + type: "string" securityContext: description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" properties: @@ -2854,7 +2971,7 @@ spec: description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\"." + description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." @@ -2872,7 +2989,7 @@ spec: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." @@ -3111,7 +3228,7 @@ spec: description: "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod." type: "string" resourceClaimTemplateName: - description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). \n An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." type: "string" type: "object" required: @@ -3186,7 +3303,7 @@ spec: description: "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\"." + description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." @@ -3226,7 +3343,7 @@ spec: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." @@ -3656,21 +3773,6 @@ spec: resources: description: "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: "ResourceClaim references one entry in PodSpec.ResourceClaims." - properties: - name: - description: "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." - type: "string" - required: - - "name" - type: "object" - type: "array" - x-kubernetes-list-map-keys: - - "name" - x-kubernetes-list-type: "map" limits: additionalProperties: anyOf: @@ -3724,6 +3826,9 @@ spec: storageClassName: description: "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1" type: "string" + volumeAttributesClassName: + description: "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled." + type: "string" volumeMode: description: "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec." type: "string" @@ -3978,6 +4083,55 @@ spec: items: description: "Projection that may be projected along with other supported volume types" properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. \n Alpha, gated by the ClusterTrustBundleProjection feature gate. \n ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. \n Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time." + properties: + labelSelector: + description: "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\"." + properties: + matchExpressions: + description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." + items: + description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + properties: + key: + description: "key is the label key that the selector applies to." + type: "string" + operator: + description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + type: "string" + values: + description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + items: + type: "string" + type: "array" + required: + - "key" + - "operator" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + type: "object" + type: "object" + x-kubernetes-map-type: "atomic" + name: + description: "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector." + type: "string" + optional: + description: "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles." + type: "boolean" + path: + description: "Relative path from the volume root to write the bundle." + type: "string" + signerName: + description: "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated." + type: "string" + required: + - "path" + type: "object" configMap: description: "configMap information about the configMap data to project" properties: diff --git a/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/modules.yaml b/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/modules.yaml index 4b45c3c70..0a310b951 100644 --- a/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/modules.yaml +++ b/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/modules.yaml @@ -125,7 +125,7 @@ spec: description: "Workspace to execute the module." properties: id: - description: "Module Workspace ID. Must match pattern: ^ws-[a-zA-Z0-9]+$" + description: "Module Workspace ID. Must match pattern: `^ws-[a-zA-Z0-9]+$`" pattern: "^ws-[a-zA-Z0-9]+$" type: "string" name: diff --git a/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/workspaces.yaml b/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/workspaces.yaml index 08eefadfa..b5ddc5531 100644 --- a/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/workspaces.yaml +++ b/crd-catalog/hashicorp/terraform-cloud-operator/app.terraform.io/v1alpha2/workspaces.yaml @@ -37,7 +37,7 @@ spec: description: "Terraform Cloud Agents allow Terraform Cloud to communicate with isolated, private, or on-premises infrastructure. More information: - https://developer.hashicorp.com/terraform/cloud-docs/agents" properties: id: - description: "Agent Pool ID. Must match pattern: ^apool-[a-zA-Z0-9]+$" + description: "Agent Pool ID. Must match pattern: `^apool-[a-zA-Z0-9]+$`" pattern: "^apool-[a-zA-Z0-9]+$" type: "string" name: @@ -188,7 +188,7 @@ spec: - "slack" type: "string" url: - description: "The URL of the notification. Must match pattern: ^https?://.*" + description: "The URL of the notification. Must match pattern: `^https?://.*`" pattern: "^https?://.*" type: "string" required: @@ -205,7 +205,7 @@ spec: description: "Projects let you organize your workspaces into groups. Default: default organization project. More information: - https://developer.hashicorp.com/terraform/tutorials/cloud/projects" properties: id: - description: "Project ID. Must match pattern: ^prj-[a-zA-Z0-9]+$" + description: "Project ID. Must match pattern: `^prj-[a-zA-Z0-9]+$`" pattern: "^prj-[a-zA-Z0-9]+$" type: "string" name: @@ -226,7 +226,7 @@ spec: description: "ConsumerWorkspace allows access to the state for specific workspaces within the same organization. Only one of the fields `ID` or `Name` is allowed. At least one of the fields `ID` or `Name` is mandatory. More information: - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/state#remote-state-access-controls" properties: id: - description: "Consumer Workspace ID. Must match pattern: ^ws-[a-zA-Z0-9]+$" + description: "Consumer Workspace ID. Must match pattern: `^ws-[a-zA-Z0-9]+$`" pattern: "^ws-[a-zA-Z0-9]+$" type: "string" name: @@ -248,7 +248,7 @@ spec: pattern: "^(advisory|mandatory)$" type: "string" id: - description: "Run Task ID. Must match pattern: ^task-[a-zA-Z0-9]+$" + description: "Run Task ID. Must match pattern: `^task-[a-zA-Z0-9]+$`" pattern: "^task-[a-zA-Z0-9]+$" type: "string" name: @@ -269,7 +269,7 @@ spec: description: "RunTrigger allows you to connect this workspace to one or more source workspaces. These connections allow runs to queue automatically in this workspace on successful apply of runs in any of the source workspaces. Only one of the fields `ID` or `Name` is allowed. At least one of the fields `ID` or `Name` is mandatory. More information: - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/settings/run-triggers" properties: id: - description: "Source Workspace ID. Must match pattern: ^ws-[a-zA-Z0-9]+$" + description: "Source Workspace ID. Must match pattern: `^ws-[a-zA-Z0-9]+$`" pattern: "^ws-[a-zA-Z0-9]+$" type: "string" name: @@ -283,7 +283,7 @@ spec: description: "SSH key used to clone Terraform modules. More information: - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/settings/ssh-keys" properties: id: - description: "SSH key ID. Must match pattern: ^sshkey-[a-zA-Z0-9]+$" + description: "SSH key ID. Must match pattern: `^sshkey-[a-zA-Z0-9]+$`" pattern: "^sshkey-[a-zA-Z0-9]+$" type: "string" name: @@ -343,7 +343,7 @@ spec: description: "Team to grant access. More information: - https://developer.hashicorp.com/terraform/cloud-docs/users-teams-organizations/teams" properties: id: - description: "Team ID. Must match pattern: ^team-[a-zA-Z0-9]+$" + description: "Team ID. Must match pattern: `^team-[a-zA-Z0-9]+$`" pattern: "^team-[a-zA-Z0-9]+$" type: "string" name: @@ -424,7 +424,7 @@ spec: minItems: 1 type: "array" terraformVersion: - description: "The version of Terraform to use for this workspace. If not specified, the latest available version will be used. Must match pattern: ^\\\\d{1}\\\\.\\\\d{1,2}\\\\.\\\\d{1,2}$ More information: - https://www.terraform.io/cloud-docs/workspaces/settings#terraform-version" + description: "The version of Terraform to use for this workspace. If not specified, the latest available version will be used. Must match pattern: `^\\\\d{1}\\\\.\\\\d{1,2}\\\\.\\\\d{1,2}$` More information: - https://www.terraform.io/cloud-docs/workspaces/settings#terraform-version" pattern: "^\\d{1}\\.\\d{1,2}\\.\\d{1,2}$" type: "string" token: @@ -457,11 +457,11 @@ spec: minLength: 1 type: "string" oAuthTokenID: - description: "The VCS Connection (OAuth Connection + Token) to use. Must match pattern: ^ot-[a-zA-Z0-9]+$" + description: "The VCS Connection (OAuth Connection + Token) to use. Must match pattern: `^ot-[a-zA-Z0-9]+$`" pattern: "^ot-[a-zA-Z0-9]+$" type: "string" repository: - description: "A reference to your VCS repository in the format / where and refer to the organization and repository in your VCS provider." + description: "A reference to your VCS repository in the format `/` where `` and `` refer to the organization and repository in your VCS provider." minLength: 1 type: "string" type: "object" diff --git a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/hcpvaultsecretsapps.yaml b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/hcpvaultsecretsapps.yaml index 4bda9a98d..25b958b11 100644 --- a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/hcpvaultsecretsapps.yaml +++ b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/hcpvaultsecretsapps.yaml @@ -60,7 +60,6 @@ spec: description: "Transformation provides configuration for transforming the secret data before it is stored in the Destination." properties: excludeRaw: - default: false description: "ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration." type: "boolean" excludes: @@ -73,10 +72,6 @@ spec: items: type: "string" type: "array" - resync: - default: true - description: "Resync the Secret on updates to any configured TransformationRefs." - type: "boolean" templates: additionalProperties: description: "Template provides templating configuration." @@ -128,9 +123,6 @@ spec: - "name" type: "object" type: "array" - required: - - "excludeRaw" - - "resync" type: "object" type: description: "Type of Kubernetes Secret. Requires Create to be set to true. Defaults to Opaque." @@ -173,9 +165,15 @@ spec: status: description: "HCPVaultSecretsAppStatus defines the observed state of HCPVaultSecretsApp" properties: + lastGeneration: + description: "LastGeneration is the Generation of the last reconciled resource." + format: "int64" + type: "integer" secretMAC: description: "SecretMAC used when deciding whether new Vault secret data should be synced. \n The controller will compare the \"new\" HCP Vault Secrets App data to this value using HMAC, if they are different, then the data will be synced to the Destination. \n The SecretMac is also used to detect drift in the Destination Secret's Data. If drift is detected the data will be synced to the Destination." type: "string" + required: + - "lastGeneration" type: "object" type: "object" served: true diff --git a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultdynamicsecrets.yaml b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultdynamicsecrets.yaml index 74b7ef620..1e61f319d 100644 --- a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultdynamicsecrets.yaml +++ b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultdynamicsecrets.yaml @@ -60,7 +60,6 @@ spec: description: "Transformation provides configuration for transforming the secret data before it is stored in the Destination." properties: excludeRaw: - default: false description: "ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration." type: "boolean" excludes: @@ -73,10 +72,6 @@ spec: items: type: "string" type: "array" - resync: - default: true - description: "Resync the Secret on updates to any configured TransformationRefs." - type: "boolean" templates: additionalProperties: description: "Template provides templating configuration." @@ -128,9 +123,6 @@ spec: - "name" type: "object" type: "array" - required: - - "excludeRaw" - - "resync" type: "object" type: description: "Type of Kubernetes Secret. Requires Create to be set to true. Defaults to Opaque." diff --git a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultpkisecrets.yaml b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultpkisecrets.yaml index d4c3058ae..08dc0303f 100644 --- a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultpkisecrets.yaml +++ b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultpkisecrets.yaml @@ -68,7 +68,6 @@ spec: description: "Transformation provides configuration for transforming the secret data before it is stored in the Destination." properties: excludeRaw: - default: false description: "ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration." type: "boolean" excludes: @@ -81,10 +80,6 @@ spec: items: type: "string" type: "array" - resync: - default: true - description: "Resync the Secret on updates to any configured TransformationRefs." - type: "boolean" templates: additionalProperties: description: "Template provides templating configuration." @@ -136,9 +131,6 @@ spec: - "name" type: "object" type: "array" - required: - - "excludeRaw" - - "resync" type: "object" type: description: "Type of Kubernetes Secret. Requires Create to be set to true. Defaults to Opaque." @@ -216,6 +208,11 @@ spec: items: type: "string" type: "array" + userIDs: + description: "User ID (OID 0.9.2342.19200300.100.1.1) Subject values to be placed on the signed certificate." + items: + type: "string" + type: "array" vaultAuthRef: description: "VaultAuthRef to the VaultAuth resource, can be prefixed with a namespace, eg: `namespaceA/vaultAuthRefB`. If no namespace prefix is provided it will default to namespace of the VaultAuth CR. If no value is specified for VaultAuthRef the Operator will default to the `default` VaultAuth, configured in the operator's namespace." type: "string" diff --git a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultstaticsecrets.yaml b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultstaticsecrets.yaml index 20c8526a6..812c92aed 100644 --- a/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultstaticsecrets.yaml +++ b/crd-catalog/hashicorp/vault-secrets-operator/secrets.hashicorp.com/v1beta1/vaultstaticsecrets.yaml @@ -57,7 +57,6 @@ spec: description: "Transformation provides configuration for transforming the secret data before it is stored in the Destination." properties: excludeRaw: - default: false description: "ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration." type: "boolean" excludes: @@ -70,10 +69,6 @@ spec: items: type: "string" type: "array" - resync: - default: true - description: "Resync the Secret on updates to any configured TransformationRefs." - type: "boolean" templates: additionalProperties: description: "Template provides templating configuration." @@ -125,9 +120,6 @@ spec: - "name" type: "object" type: "array" - required: - - "excludeRaw" - - "resync" type: "object" type: description: "Type of Kubernetes Secret. Requires Create to be set to true. Defaults to Opaque." diff --git a/crd-catalog/hazelcast/hazelcast-platform-operator/hazelcast.com/v1alpha1/managementcenters.yaml b/crd-catalog/hazelcast/hazelcast-platform-operator/hazelcast.com/v1alpha1/managementcenters.yaml index 547f06586..cae861f4d 100644 --- a/crd-catalog/hazelcast/hazelcast-platform-operator/hazelcast.com/v1alpha1/managementcenters.yaml +++ b/crd-catalog/hazelcast/hazelcast-platform-operator/hazelcast.com/v1alpha1/managementcenters.yaml @@ -71,6 +71,10 @@ spec: ingressClassName: description: "IngressClassName of the ingress object." type: "string" + path: + default: "/" + description: "Path of the ingress rule." + type: "string" required: - "hostname" type: "object" diff --git a/crd-catalog/istio/istio/networking.istio.io/v1alpha3/destinationrules.yaml b/crd-catalog/istio/istio/networking.istio.io/v1alpha3/destinationrules.yaml index 7745019b0..466190670 100644 --- a/crd-catalog/istio/istio/networking.istio.io/v1alpha3/destinationrules.yaml +++ b/crd-catalog/istio/istio/networking.istio.io/v1alpha3/destinationrules.yaml @@ -568,6 +568,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" @@ -616,6 +619,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" @@ -1176,6 +1182,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" @@ -1224,6 +1233,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" diff --git a/crd-catalog/istio/istio/networking.istio.io/v1alpha3/gateways.yaml b/crd-catalog/istio/istio/networking.istio.io/v1alpha3/gateways.yaml index ab75ae4e3..c8d50cada 100644 --- a/crd-catalog/istio/istio/networking.istio.io/v1alpha3/gateways.yaml +++ b/crd-catalog/istio/istio/networking.istio.io/v1alpha3/gateways.yaml @@ -77,6 +77,9 @@ spec: caCertificates: description: "REQUIRED if mode is `MUTUAL` or `OPTIONAL_MUTUAL`." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented client side certificate." + type: "string" cipherSuites: description: "Optional: If specified, only support the specified cipher list." items: diff --git a/crd-catalog/istio/istio/networking.istio.io/v1alpha3/sidecars.yaml b/crd-catalog/istio/istio/networking.istio.io/v1alpha3/sidecars.yaml index a58d659d7..e8904b559 100644 --- a/crd-catalog/istio/istio/networking.istio.io/v1alpha3/sidecars.yaml +++ b/crd-catalog/istio/istio/networking.istio.io/v1alpha3/sidecars.yaml @@ -246,6 +246,9 @@ spec: caCertificates: description: "REQUIRED if mode is `MUTUAL` or `OPTIONAL_MUTUAL`." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented client side certificate." + type: "string" cipherSuites: description: "Optional: If specified, only support the specified cipher list." items: diff --git a/crd-catalog/istio/istio/networking.istio.io/v1beta1/destinationrules.yaml b/crd-catalog/istio/istio/networking.istio.io/v1beta1/destinationrules.yaml index 6022d27a1..283ff800c 100644 --- a/crd-catalog/istio/istio/networking.istio.io/v1beta1/destinationrules.yaml +++ b/crd-catalog/istio/istio/networking.istio.io/v1beta1/destinationrules.yaml @@ -568,6 +568,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" @@ -616,6 +619,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" @@ -1176,6 +1182,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" @@ -1224,6 +1233,9 @@ spec: caCertificates: description: "OPTIONAL: The path to the file containing certificate authority certificates to use in verifying a presented server certificate." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented server certificate." + type: "string" clientCertificate: description: "REQUIRED if mode is `MUTUAL`." type: "string" diff --git a/crd-catalog/istio/istio/networking.istio.io/v1beta1/gateways.yaml b/crd-catalog/istio/istio/networking.istio.io/v1beta1/gateways.yaml index 4e75334eb..d9a4ce027 100644 --- a/crd-catalog/istio/istio/networking.istio.io/v1beta1/gateways.yaml +++ b/crd-catalog/istio/istio/networking.istio.io/v1beta1/gateways.yaml @@ -77,6 +77,9 @@ spec: caCertificates: description: "REQUIRED if mode is `MUTUAL` or `OPTIONAL_MUTUAL`." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented client side certificate." + type: "string" cipherSuites: description: "Optional: If specified, only support the specified cipher list." items: diff --git a/crd-catalog/istio/istio/networking.istio.io/v1beta1/sidecars.yaml b/crd-catalog/istio/istio/networking.istio.io/v1beta1/sidecars.yaml index 9ca80221c..b84bab1c7 100644 --- a/crd-catalog/istio/istio/networking.istio.io/v1beta1/sidecars.yaml +++ b/crd-catalog/istio/istio/networking.istio.io/v1beta1/sidecars.yaml @@ -246,6 +246,9 @@ spec: caCertificates: description: "REQUIRED if mode is `MUTUAL` or `OPTIONAL_MUTUAL`." type: "string" + caCrl: + description: "OPTIONAL: The path to the file containing the certificate revocation list (CRL) to use in verifying a presented client side certificate." + type: "string" cipherSuites: description: "Optional: If specified, only support the specified cipher list." items: diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusteridentities.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusteridentities.yaml new file mode 100644 index 000000000..9fa134be1 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusteridentities.yaml @@ -0,0 +1,112 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vsphereclusteridentities.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereClusterIdentity" + listKind: "VSphereClusterIdentityList" + plural: "vsphereclusteridentities" + singular: "vsphereclusteridentity" + scope: "Cluster" + versions: + - deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "VSphereClusterIdentity defines the account to be used for reconciling clusters \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + properties: + allowedNamespaces: + description: "AllowedNamespaces is used to identify which namespaces are allowed to use this account. Namespaces can be selected with a label selector. If this object is nil, no namespaces will be allowed" + properties: + selector: + description: "Selector is a standard Kubernetes LabelSelector. A label query over a set of resources." + properties: + matchExpressions: + description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." + items: + description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + properties: + key: + description: "key is the label key that the selector applies to." + type: "string" + operator: + description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + type: "string" + values: + description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + items: + type: "string" + type: "array" + required: + - "key" + - "operator" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + type: "object" + type: "object" + x-kubernetes-map-type: "atomic" + type: "object" + secretName: + description: "SecretName references a Secret inside the controller namespace with the credentials to use" + minLength: 1 + type: "string" + type: "object" + status: + properties: + conditions: + description: "Conditions defines current service state of the VSphereCluster." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + ready: + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusters.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusters.yaml new file mode 100644 index 000000000..c1072ec6f --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusters.yaml @@ -0,0 +1,325 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vsphereclusters.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereCluster" + listKind: "VSphereClusterList" + plural: "vsphereclusters" + singular: "vspherecluster" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Cluster infrastructure is ready for VSphereMachine" + jsonPath: ".status.ready" + name: "Ready" + type: "string" + - description: "Server is the address of the vSphere endpoint" + jsonPath: ".spec.server" + name: "Server" + type: "string" + - description: "API Endpoint" + jsonPath: ".spec.controlPlaneEndpoint[0]" + name: "ControlPlaneEndpoint" + priority: 1 + type: "string" + - description: "Time duration since creation of Machine" + jsonPath: ".metadata.creationTimestamp" + name: "Age" + type: "date" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "VSphereCluster is the Schema for the vsphereclusters API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereClusterSpec defines the desired state of VSphereCluster." + properties: + cloudProviderConfiguration: + description: "CloudProviderConfiguration holds the cluster-wide configuration for the vSphere cloud provider. \n Deprecated: will be removed in v1alpha4." + properties: + disk: + description: "Disk is the vSphere cloud provider's disk configuration." + properties: + scsiControllerType: + description: "SCSIControllerType defines SCSI controller to be used." + type: "string" + type: "object" + global: + description: "Global is the vSphere cloud provider's global configuration." + properties: + apiBindPort: + description: "APIBindPort configures the vSphere cloud controller manager API port. Defaults to 43001." + type: "string" + apiDisable: + description: "APIDisable disables the vSphere cloud controller manager API. Defaults to true." + type: "boolean" + caFile: + description: "CAFile Specifies the path to a CA certificate in PEM format. If not configured, the system's CA certificates will be used." + type: "string" + datacenters: + description: "Datacenters is a CSV string of the datacenters in which VMs are located." + type: "string" + insecure: + description: "Insecure is a flag that disables TLS peer verification." + type: "boolean" + password: + description: "Password is the password used to access a vSphere endpoint." + type: "string" + port: + description: "Port is the port on which the vSphere endpoint is listening. Defaults to 443." + type: "string" + roundTripperCount: + description: "RoundTripperCount specifies the SOAP round tripper count (retries = RoundTripper - 1)" + format: "int32" + type: "integer" + secretName: + description: "SecretName is the name of the Kubernetes secret in which the vSphere credentials are located." + type: "string" + secretNamespace: + description: "SecretNamespace is the namespace for SecretName." + type: "string" + secretsDirectory: + description: "SecretsDirectory is a directory in which secrets may be found. This may used in the event that: 1. It is not desirable to use the K8s API to watch changes to secrets 2. The cloud controller manager is not running in a K8s environment, such as DC/OS. For example, the container storage interface (CSI) is container orcehstrator (CO) agnostic, and should support non-K8s COs. Defaults to /etc/cloud/credentials." + type: "string" + serviceAccount: + description: "ServiceAccount is the Kubernetes service account used to launch the cloud controller manager. Defaults to cloud-controller-manager." + type: "string" + thumbprint: + description: "Thumbprint is the cryptographic thumbprint of the vSphere endpoint's certificate." + type: "string" + username: + description: "Username is the username used to access a vSphere endpoint." + type: "string" + type: "object" + labels: + description: "Labels is the vSphere cloud provider's zone and region configuration." + properties: + region: + description: "Region is the region in which VMs are created/located." + type: "string" + zone: + description: "Zone is the zone in which VMs are created/located." + type: "string" + type: "object" + network: + description: "Network is the vSphere cloud provider's network configuration." + properties: + name: + description: "Name is the name of the network to which VMs are connected." + type: "string" + type: "object" + providerConfig: + description: "CPIProviderConfig contains extra information used to configure the vSphere cloud provider." + properties: + cloud: + properties: + controllerImage: + type: "string" + extraArgs: + additionalProperties: + type: "string" + description: "ExtraArgs passes through extra arguments to the cloud provider. The arguments here are passed to the cloud provider daemonset specification" + type: "object" + type: "object" + storage: + properties: + attacherImage: + type: "string" + controllerImage: + type: "string" + livenessProbeImage: + type: "string" + metadataSyncerImage: + type: "string" + nodeDriverImage: + type: "string" + provisionerImage: + type: "string" + registrarImage: + type: "string" + type: "object" + type: "object" + virtualCenter: + additionalProperties: + description: "CPIVCenterConfig is a vSphere cloud provider's vCenter configuration." + properties: + datacenters: + description: "Datacenters is a CSV string of the datacenters in which VMs are located." + type: "string" + password: + description: "Password is the password used to access a vSphere endpoint." + type: "string" + port: + description: "Port is the port on which the vSphere endpoint is listening. Defaults to 443." + type: "string" + roundTripperCount: + description: "RoundTripperCount specifies the SOAP round tripper count (retries = RoundTripper - 1)" + format: "int32" + type: "integer" + thumbprint: + description: "Thumbprint is the cryptographic thumbprint of the vSphere endpoint's certificate." + type: "string" + username: + description: "Username is the username used to access a vSphere endpoint." + type: "string" + type: "object" + description: "VCenter is a list of vCenter configurations." + type: "object" + workspace: + description: "Workspace is the vSphere cloud provider's workspace configuration." + properties: + datacenter: + description: "Datacenter is the datacenter in which VMs are created/located." + type: "string" + datastore: + description: "Datastore is the datastore in which VMs are created/located." + type: "string" + folder: + description: "Folder is the folder in which VMs are created/located." + type: "string" + resourcePool: + description: "ResourcePool is the resource pool in which VMs are created/located." + type: "string" + server: + description: "Server is the IP address or FQDN of the vSphere endpoint." + type: "string" + type: "object" + type: "object" + controlPlaneEndpoint: + description: "ControlPlaneEndpoint represents the endpoint used to communicate with the control plane." + properties: + host: + description: "The hostname on which the API server is serving." + type: "string" + port: + description: "The port on which the API server is serving." + format: "int32" + type: "integer" + required: + - "host" + - "port" + type: "object" + identityRef: + description: "IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster." + properties: + kind: + description: "Kind of the identity. Can either be VSphereClusterIdentity or Secret" + enum: + - "VSphereClusterIdentity" + - "Secret" + type: "string" + name: + description: "Name of the identity." + minLength: 1 + type: "string" + required: + - "kind" + - "name" + type: "object" + insecure: + description: "Insecure is a flag that controls whether to validate the vSphere server's certificate. \n Deprecated: will be removed in v1alpha4." + type: "boolean" + loadBalancerRef: + description: "LoadBalancerRef may be used to enable a control plane load balancer for this cluster. When a LoadBalancerRef is provided, the VSphereCluster.Status.Ready field will not be true until the referenced resource is Status.Ready and has a non-empty Status.Address value. \n Deprecated: will be removed in v1alpha4." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + server: + description: "Server is the address of the vSphere endpoint." + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When provided, Insecure should not be set to true" + type: "string" + type: "object" + status: + description: "VSphereClusterStatus defines the observed state of VSphereClusterSpec." + properties: + conditions: + description: "Conditions defines current service state of the VSphereCluster." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureDomains: + additionalProperties: + description: "FailureDomainSpec is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across." + properties: + attributes: + additionalProperties: + type: "string" + description: "Attributes is a free form map of attributes an infrastructure provider might use or require." + type: "object" + controlPlane: + description: "ControlPlane determines if this failure domain is suitable for use by control plane machines." + type: "boolean" + type: "object" + description: "FailureDomains is a list of failure domain objects synced from the infrastructure provider." + type: "object" + ready: + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheredeploymentzones.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheredeploymentzones.yaml new file mode 100644 index 000000000..8eb97a577 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheredeploymentzones.yaml @@ -0,0 +1,96 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspheredeploymentzones.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereDeploymentZone" + listKind: "VSphereDeploymentZoneList" + plural: "vspheredeploymentzones" + singular: "vspheredeploymentzone" + scope: "Cluster" + versions: + - deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "VSphereDeploymentZone is the Schema for the vspheredeploymentzones API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereDeploymentZoneSpec defines the desired state of VSphereDeploymentZone" + properties: + controlPlane: + description: "ControlPlane determines if this failure domain is suitable for use by control plane machines." + type: "boolean" + failureDomain: + description: "failureDomain is the name of the VSphereFailureDomain used for this VSphereDeploymentZone" + type: "string" + placementConstraint: + description: "PlacementConstraint encapsulates the placement constraints used within this deployment zone." + properties: + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + type: "object" + server: + description: "Server is the address of the vSphere endpoint." + type: "string" + required: + - "placementConstraint" + type: "object" + status: + properties: + conditions: + description: "Conditions defines current service state of the VSphereMachine." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + ready: + description: "Ready is true when the VSphereDeploymentZone resource is ready. If set to false, it will be ignored by VSphereClusters" + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherefailuredomains.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherefailuredomains.yaml new file mode 100644 index 000000000..11d282e55 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherefailuredomains.yaml @@ -0,0 +1,123 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspherefailuredomains.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereFailureDomain" + listKind: "VSphereFailureDomainList" + plural: "vspherefailuredomains" + singular: "vspherefailuredomain" + scope: "Cluster" + versions: + - deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "VSphereFailureDomain is the Schema for the vspherefailuredomains API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereFailureDomainSpec defines the desired state of VSphereFailureDomain" + properties: + region: + description: "Region defines the name and type of a region" + properties: + autoConfigure: + description: "AutoConfigure tags the Type which is specified in the Topology" + type: "boolean" + name: + description: "Name is the name of the tag that represents this failure domain" + type: "string" + tagCategory: + description: "TagCategory is the category used for the tag" + type: "string" + type: + description: "Type is the type of failure domain, the current values are \"Datacenter\", \"ComputeCluster\" and \"HostGroup\"" + enum: + - "Datacenter" + - "ComputeCluster" + - "HostGroup" + type: "string" + required: + - "name" + - "tagCategory" + - "type" + type: "object" + topology: + description: "Topology is the what describes a given failure domain using vSphere constructs" + properties: + computeCluster: + description: "ComputeCluster as the failure domain" + type: "string" + datacenter: + description: "The underlying infrastructure for this failure domain Datacenter as the failure domain" + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + hosts: + description: "Hosts has information required for placement of machines on VSphere hosts." + properties: + hostGroupName: + description: "HostGroupName is the name of the Host group" + type: "string" + vmGroupName: + description: "VMGroupName is the name of the VM group" + type: "string" + required: + - "hostGroupName" + - "vmGroupName" + type: "object" + networks: + description: "Networks is the list of networks within this failure domain" + items: + type: "string" + type: "array" + required: + - "datacenter" + type: "object" + zone: + description: "Zone defines the name and type of a zone" + properties: + autoConfigure: + description: "AutoConfigure tags the Type which is specified in the Topology" + type: "boolean" + name: + description: "Name is the name of the tag that represents this failure domain" + type: "string" + tagCategory: + description: "TagCategory is the category used for the tag" + type: "string" + type: + description: "Type is the type of failure domain, the current values are \"Datacenter\", \"ComputeCluster\" and \"HostGroup\"" + enum: + - "Datacenter" + - "ComputeCluster" + - "HostGroup" + type: "string" + required: + - "name" + - "tagCategory" + - "type" + type: "object" + required: + - "region" + - "topology" + - "zone" + type: "object" + type: "object" + served: false + storage: false diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachines.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachines.yaml new file mode 100644 index 000000000..00d43ccf2 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachines.yaml @@ -0,0 +1,304 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspheremachines.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereMachine" + listKind: "VSphereMachineList" + plural: "vspheremachines" + singular: "vspheremachine" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Cluster to which this VSphereMachine belongs" + jsonPath: ".metadata.labels.cluster\\.x-k8s\\.io/cluster-name" + name: "Cluster" + type: "string" + - description: "Machine ready status" + jsonPath: ".status.ready" + name: "Ready" + type: "string" + - description: "VSphereMachine instance ID" + jsonPath: ".spec.providerID" + name: "ProviderID" + type: "string" + - description: "Machine object which owns this VSphereMachine" + jsonPath: ".metadata.ownerReferences[?(@.kind==\"Machine\")].name" + name: "Machine" + priority: 1 + type: "string" + - description: "Time duration since creation of Machine" + jsonPath: ".metadata.creationTimestamp" + name: "Age" + type: "date" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "VSphereMachine is the Schema for the vspheremachines API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereMachineSpec defines the desired state of VSphereMachine" + properties: + cloneMode: + description: "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots." + type: "string" + customVMXKeys: + additionalProperties: + type: "string" + description: "CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map" + type: "object" + datacenter: + description: "Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located." + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + diskGiB: + description: "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + failureDomain: + description: "FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone." + type: "string" + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + memoryMiB: + description: "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int64" + type: "integer" + network: + description: "Network is the network configuration for this machine's VM." + properties: + devices: + description: "Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name" + items: + description: "NetworkDeviceSpec defines the network configuration for a virtual machine's network device." + properties: + deviceName: + description: "DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system." + type: "string" + dhcp4: + description: "DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses." + type: "boolean" + dhcp6: + description: "DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses." + type: "boolean" + gateway4: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false." + type: "string" + gateway6: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false." + type: "string" + ipAddrs: + description: "IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider." + type: "string" + mtu: + description: "MTU is the device’s Maximum Transmission Unit size in bytes." + format: "int64" + type: "integer" + nameservers: + description: "Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf)." + items: + type: "string" + type: "array" + networkName: + description: "NetworkName is the name of the vSphere network to which the device will be connected." + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the device." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + searchDomains: + description: "SearchDomains is a list of search domains used when resolving IP addresses with DNS." + items: + type: "string" + type: "array" + required: + - "networkName" + type: "object" + type: "array" + preferredAPIServerCidr: + description: "PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine" + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the virtual machine." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + required: + - "devices" + type: "object" + numCPUs: + description: "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + numCoresPerSocket: + description: "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + providerID: + description: "ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc" + type: "string" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + server: + description: "Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located." + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot." + type: "string" + storagePolicyName: + description: "StoragePolicyName of the storage policy to use with this Virtual Machine" + type: "string" + template: + description: "Template is the name or inventory path of the template used to clone the virtual machine." + minLength: 1 + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server." + type: "string" + required: + - "network" + - "template" + type: "object" + status: + description: "VSphereMachineStatus defines the observed state of VSphereMachine" + properties: + addresses: + description: "Addresses contains the VSphere instance associated addresses." + items: + description: "MachineAddress contains information for the node's address." + properties: + address: + description: "The machine address." + type: "string" + type: + description: "Machine address type, one of Hostname, ExternalIP or InternalIP." + type: "string" + required: + - "address" + - "type" + type: "object" + type: "array" + conditions: + description: "Conditions defines current service state of the VSphereMachine." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureMessage: + description: "FailureMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: "string" + failureReason: + description: "FailureReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: "string" + network: + description: "Network returns the network status for each of the machine's configured network interfaces." + items: + description: "NetworkStatus provides information about one of a VM's networks." + properties: + connected: + description: "Connected is a flag that indicates whether this network is currently connected to the VM." + type: "boolean" + ipAddrs: + description: "IPAddrs is one or more IP addresses reported by vm-tools." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address of the network device." + type: "string" + networkName: + description: "NetworkName is the name of the network." + type: "string" + required: + - "macAddr" + type: "object" + type: "array" + ready: + description: "Ready is true when the provider resource is ready." + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachinetemplates.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachinetemplates.yaml new file mode 100644 index 000000000..dde8585a5 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachinetemplates.yaml @@ -0,0 +1,265 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspheremachinetemplates.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereMachineTemplate" + listKind: "VSphereMachineTemplateList" + plural: "vspheremachinetemplates" + singular: "vspheremachinetemplate" + scope: "Namespaced" + versions: + - deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "VSphereMachineTemplate is the Schema for the vspheremachinetemplates API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereMachineTemplateSpec defines the desired state of VSphereMachineTemplate" + properties: + template: + description: "VSphereMachineTemplateResource describes the data needed to create a VSphereMachine from a template" + properties: + metadata: + description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + type: "object" + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency \n Deprecated: This field has no function and is going to be removed in a next release." + type: "string" + labels: + additionalProperties: + type: "string" + description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + type: "object" + name: + description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names \n Deprecated: This field has no function and is going to be removed in a next release." + type: "string" + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces \n Deprecated: This field has no function and is going to be removed in a next release." + type: "string" + ownerReferences: + description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. \n Deprecated: This field has no function and is going to be removed in a next release." + items: + description: "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + blockOwnerDeletion: + description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned." + type: "boolean" + controller: + description: "If true, this reference points to the managing controller." + type: "boolean" + kind: + description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names" + type: "string" + uid: + description: "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids" + type: "string" + required: + - "apiVersion" + - "kind" + - "name" + - "uid" + type: "object" + x-kubernetes-map-type: "atomic" + type: "array" + type: "object" + spec: + description: "Spec is the specification of the desired behavior of the machine." + properties: + cloneMode: + description: "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots." + type: "string" + customVMXKeys: + additionalProperties: + type: "string" + description: "CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map" + type: "object" + datacenter: + description: "Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located." + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + diskGiB: + description: "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + failureDomain: + description: "FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone." + type: "string" + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + memoryMiB: + description: "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int64" + type: "integer" + network: + description: "Network is the network configuration for this machine's VM." + properties: + devices: + description: "Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name" + items: + description: "NetworkDeviceSpec defines the network configuration for a virtual machine's network device." + properties: + deviceName: + description: "DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system." + type: "string" + dhcp4: + description: "DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses." + type: "boolean" + dhcp6: + description: "DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses." + type: "boolean" + gateway4: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false." + type: "string" + gateway6: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false." + type: "string" + ipAddrs: + description: "IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider." + type: "string" + mtu: + description: "MTU is the device’s Maximum Transmission Unit size in bytes." + format: "int64" + type: "integer" + nameservers: + description: "Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf)." + items: + type: "string" + type: "array" + networkName: + description: "NetworkName is the name of the vSphere network to which the device will be connected." + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the device." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + searchDomains: + description: "SearchDomains is a list of search domains used when resolving IP addresses with DNS." + items: + type: "string" + type: "array" + required: + - "networkName" + type: "object" + type: "array" + preferredAPIServerCidr: + description: "PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine" + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the virtual machine." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + required: + - "devices" + type: "object" + numCPUs: + description: "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + numCoresPerSocket: + description: "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + providerID: + description: "ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc" + type: "string" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + server: + description: "Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located." + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot." + type: "string" + storagePolicyName: + description: "StoragePolicyName of the storage policy to use with this Virtual Machine" + type: "string" + template: + description: "Template is the name or inventory path of the template used to clone the virtual machine." + minLength: 1 + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server." + type: "string" + required: + - "network" + - "template" + type: "object" + required: + - "spec" + type: "object" + required: + - "template" + type: "object" + type: "object" + served: false + storage: false diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherevms.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherevms.yaml new file mode 100644 index 000000000..0dcf81259 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherevms.yaml @@ -0,0 +1,307 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspherevms.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereVM" + listKind: "VSphereVMList" + plural: "vspherevms" + singular: "vspherevm" + scope: "Namespaced" + versions: + - deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "VSphereVM is the Schema for the vspherevms API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereVMSpec defines the desired state of VSphereVM." + properties: + biosUUID: + description: "BiosUUID is the VM's BIOS UUID that is assigned at runtime after the VM has been created. This field is required at runtime for other controllers that read this CRD as unstructured data." + type: "string" + bootstrapRef: + description: "BootstrapRef is a reference to a bootstrap provider-specific resource that holds configuration details. This field is optional in case no bootstrap data is required to create a VM." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + cloneMode: + description: "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots." + type: "string" + customVMXKeys: + additionalProperties: + type: "string" + description: "CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map" + type: "object" + datacenter: + description: "Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located." + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + diskGiB: + description: "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + memoryMiB: + description: "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int64" + type: "integer" + network: + description: "Network is the network configuration for this machine's VM." + properties: + devices: + description: "Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name" + items: + description: "NetworkDeviceSpec defines the network configuration for a virtual machine's network device." + properties: + deviceName: + description: "DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system." + type: "string" + dhcp4: + description: "DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses." + type: "boolean" + dhcp6: + description: "DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses." + type: "boolean" + gateway4: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false." + type: "string" + gateway6: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false." + type: "string" + ipAddrs: + description: "IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider." + type: "string" + mtu: + description: "MTU is the device’s Maximum Transmission Unit size in bytes." + format: "int64" + type: "integer" + nameservers: + description: "Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf)." + items: + type: "string" + type: "array" + networkName: + description: "NetworkName is the name of the vSphere network to which the device will be connected." + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the device." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + searchDomains: + description: "SearchDomains is a list of search domains used when resolving IP addresses with DNS." + items: + type: "string" + type: "array" + required: + - "networkName" + type: "object" + type: "array" + preferredAPIServerCidr: + description: "PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine" + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the virtual machine." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + required: + - "devices" + type: "object" + numCPUs: + description: "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + numCoresPerSocket: + description: "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + server: + description: "Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located." + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot." + type: "string" + storagePolicyName: + description: "StoragePolicyName of the storage policy to use with this Virtual Machine" + type: "string" + template: + description: "Template is the name or inventory path of the template used to clone the virtual machine." + minLength: 1 + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server." + type: "string" + required: + - "network" + - "template" + type: "object" + status: + description: "VSphereVMStatus defines the observed state of VSphereVM" + properties: + addresses: + description: "Addresses is a list of the VM's IP addresses. This field is required at runtime for other controllers that read this CRD as unstructured data." + items: + type: "string" + type: "array" + cloneMode: + description: "CloneMode is the type of clone operation used to clone this VM. Since LinkedMode is the default but fails gracefully if the source of the clone has no snapshots, this field may be used to determine the actual type of clone operation used to create this VM." + type: "string" + conditions: + description: "Conditions defines current service state of the VSphereVM." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureMessage: + description: "FailureMessage will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. \n Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output." + type: "string" + failureReason: + description: "FailureReason will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a succinct value suitable for vm interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. \n Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output." + type: "string" + network: + description: "Network returns the network status for each of the machine's configured network interfaces." + items: + description: "NetworkStatus provides information about one of a VM's networks." + properties: + connected: + description: "Connected is a flag that indicates whether this network is currently connected to the VM." + type: "boolean" + ipAddrs: + description: "IPAddrs is one or more IP addresses reported by vm-tools." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address of the network device." + type: "string" + networkName: + description: "NetworkName is the name of the network." + type: "string" + required: + - "macAddr" + type: "object" + type: "array" + ready: + description: "Ready is true when the provider resource is ready. This field is required at runtime for other controllers that read this CRD as unstructured data." + type: "boolean" + retryAfter: + description: "RetryAfter tracks the time we can retry queueing a task" + format: "date-time" + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which the VM was cloned if LinkedMode is enabled." + type: "string" + taskRef: + description: "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users." + type: "string" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusteridentities.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusteridentities.yaml new file mode 100644 index 000000000..731e24333 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusteridentities.yaml @@ -0,0 +1,112 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vsphereclusteridentities.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereClusterIdentity" + listKind: "VSphereClusterIdentityList" + plural: "vsphereclusteridentities" + singular: "vsphereclusteridentity" + scope: "Cluster" + versions: + - deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereClusterIdentity defines the account to be used for reconciling clusters \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + properties: + allowedNamespaces: + description: "AllowedNamespaces is used to identify which namespaces are allowed to use this account. Namespaces can be selected with a label selector. If this object is nil, no namespaces will be allowed" + properties: + selector: + description: "Selector is a standard Kubernetes LabelSelector. A label query over a set of resources." + properties: + matchExpressions: + description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." + items: + description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + properties: + key: + description: "key is the label key that the selector applies to." + type: "string" + operator: + description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + type: "string" + values: + description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + items: + type: "string" + type: "array" + required: + - "key" + - "operator" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + type: "object" + type: "object" + x-kubernetes-map-type: "atomic" + type: "object" + secretName: + description: "SecretName references a Secret inside the controller namespace with the credentials to use" + minLength: 1 + type: "string" + type: "object" + status: + properties: + conditions: + description: "Conditions defines current service state of the VSphereCluster." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + ready: + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusters.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusters.yaml new file mode 100644 index 000000000..295b23b3d --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusters.yaml @@ -0,0 +1,145 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vsphereclusters.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereCluster" + listKind: "VSphereClusterList" + plural: "vsphereclusters" + singular: "vspherecluster" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Cluster infrastructure is ready for VSphereMachine" + jsonPath: ".status.ready" + name: "Ready" + type: "string" + - description: "Server is the address of the vSphere endpoint" + jsonPath: ".spec.server" + name: "Server" + type: "string" + - description: "API Endpoint" + jsonPath: ".spec.controlPlaneEndpoint[0]" + name: "ControlPlaneEndpoint" + priority: 1 + type: "string" + - description: "Time duration since creation of Machine" + jsonPath: ".metadata.creationTimestamp" + name: "Age" + type: "date" + deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereCluster is the Schema for the vsphereclusters API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereClusterSpec defines the desired state of VSphereCluster" + properties: + controlPlaneEndpoint: + description: "ControlPlaneEndpoint represents the endpoint used to communicate with the control plane." + properties: + host: + description: "The hostname on which the API server is serving." + type: "string" + port: + description: "The port on which the API server is serving." + format: "int32" + type: "integer" + required: + - "host" + - "port" + type: "object" + identityRef: + description: "IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster." + properties: + kind: + description: "Kind of the identity. Can either be VSphereClusterIdentity or Secret" + enum: + - "VSphereClusterIdentity" + - "Secret" + type: "string" + name: + description: "Name of the identity." + minLength: 1 + type: "string" + required: + - "kind" + - "name" + type: "object" + server: + description: "Server is the address of the vSphere endpoint." + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate" + type: "string" + type: "object" + status: + description: "VSphereClusterStatus defines the observed state of VSphereClusterSpec" + properties: + conditions: + description: "Conditions defines current service state of the VSphereCluster." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureDomains: + additionalProperties: + description: "FailureDomainSpec is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across." + properties: + attributes: + additionalProperties: + type: "string" + description: "Attributes is a free form map of attributes an infrastructure provider might use or require." + type: "object" + controlPlane: + description: "ControlPlane determines if this failure domain is suitable for use by control plane machines." + type: "boolean" + type: "object" + description: "FailureDomains is a list of failure domain objects synced from the infrastructure provider." + type: "object" + ready: + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclustertemplates.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclustertemplates.yaml new file mode 100644 index 000000000..80a50f2eb --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclustertemplates.yaml @@ -0,0 +1,86 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vsphereclustertemplates.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereClusterTemplate" + listKind: "VSphereClusterTemplateList" + plural: "vsphereclustertemplates" + singular: "vsphereclustertemplate" + scope: "Namespaced" + versions: + - deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereClusterTemplate is the Schema for the vsphereclustertemplates API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereClusterTemplateSpec defines the desired state of VSphereClusterTemplate" + properties: + template: + properties: + spec: + description: "VSphereClusterSpec defines the desired state of VSphereCluster" + properties: + controlPlaneEndpoint: + description: "ControlPlaneEndpoint represents the endpoint used to communicate with the control plane." + properties: + host: + description: "The hostname on which the API server is serving." + type: "string" + port: + description: "The port on which the API server is serving." + format: "int32" + type: "integer" + required: + - "host" + - "port" + type: "object" + identityRef: + description: "IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster." + properties: + kind: + description: "Kind of the identity. Can either be VSphereClusterIdentity or Secret" + enum: + - "VSphereClusterIdentity" + - "Secret" + type: "string" + name: + description: "Name of the identity." + minLength: 1 + type: "string" + required: + - "kind" + - "name" + type: "object" + server: + description: "Server is the address of the vSphere endpoint." + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate" + type: "string" + type: "object" + required: + - "spec" + type: "object" + required: + - "template" + type: "object" + type: "object" + served: false + storage: false diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheredeploymentzones.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheredeploymentzones.yaml new file mode 100644 index 000000000..c0a28116a --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheredeploymentzones.yaml @@ -0,0 +1,96 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspheredeploymentzones.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereDeploymentZone" + listKind: "VSphereDeploymentZoneList" + plural: "vspheredeploymentzones" + singular: "vspheredeploymentzone" + scope: "Cluster" + versions: + - deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereDeploymentZone is the Schema for the vspheredeploymentzones API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereDeploymentZoneSpec defines the desired state of VSphereDeploymentZone" + properties: + controlPlane: + description: "ControlPlane determines if this failure domain is suitable for use by control plane machines." + type: "boolean" + failureDomain: + description: "FailureDomain is the name of the VSphereFailureDomain used for this VSphereDeploymentZone" + type: "string" + placementConstraint: + description: "PlacementConstraint encapsulates the placement constraints used within this deployment zone." + properties: + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + type: "object" + server: + description: "Server is the address of the vSphere endpoint." + type: "string" + required: + - "placementConstraint" + type: "object" + status: + properties: + conditions: + description: "Conditions defines current service state of the VSphereMachine." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + ready: + description: "Ready is true when the VSphereDeploymentZone resource is ready. If set to false, it will be ignored by VSphereClusters" + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherefailuredomains.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherefailuredomains.yaml new file mode 100644 index 000000000..6ca1a26ec --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherefailuredomains.yaml @@ -0,0 +1,123 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspherefailuredomains.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereFailureDomain" + listKind: "VSphereFailureDomainList" + plural: "vspherefailuredomains" + singular: "vspherefailuredomain" + scope: "Cluster" + versions: + - deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereFailureDomain is the Schema for the vspherefailuredomains API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereFailureDomainSpec defines the desired state of VSphereFailureDomain" + properties: + region: + description: "Region defines the name and type of a region" + properties: + autoConfigure: + description: "AutoConfigure tags the Type which is specified in the Topology" + type: "boolean" + name: + description: "Name is the name of the tag that represents this failure domain" + type: "string" + tagCategory: + description: "TagCategory is the category used for the tag" + type: "string" + type: + description: "Type is the type of failure domain, the current values are \"Datacenter\", \"ComputeCluster\" and \"HostGroup\"" + enum: + - "Datacenter" + - "ComputeCluster" + - "HostGroup" + type: "string" + required: + - "name" + - "tagCategory" + - "type" + type: "object" + topology: + description: "Topology describes a given failure domain using vSphere constructs" + properties: + computeCluster: + description: "ComputeCluster as the failure domain" + type: "string" + datacenter: + description: "The underlying infrastructure for this failure domain Datacenter as the failure domain" + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + hosts: + description: "Hosts has information required for placement of machines on VSphere hosts." + properties: + hostGroupName: + description: "HostGroupName is the name of the Host group" + type: "string" + vmGroupName: + description: "VMGroupName is the name of the VM group" + type: "string" + required: + - "hostGroupName" + - "vmGroupName" + type: "object" + networks: + description: "Networks is the list of networks within this failure domain" + items: + type: "string" + type: "array" + required: + - "datacenter" + type: "object" + zone: + description: "Zone defines the name and type of a zone" + properties: + autoConfigure: + description: "AutoConfigure tags the Type which is specified in the Topology" + type: "boolean" + name: + description: "Name is the name of the tag that represents this failure domain" + type: "string" + tagCategory: + description: "TagCategory is the category used for the tag" + type: "string" + type: + description: "Type is the type of failure domain, the current values are \"Datacenter\", \"ComputeCluster\" and \"HostGroup\"" + enum: + - "Datacenter" + - "ComputeCluster" + - "HostGroup" + type: "string" + required: + - "name" + - "tagCategory" + - "type" + type: "object" + required: + - "region" + - "topology" + - "zone" + type: "object" + type: "object" + served: false + storage: false diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachines.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachines.yaml new file mode 100644 index 000000000..2fe00113c --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachines.yaml @@ -0,0 +1,304 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspheremachines.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereMachine" + listKind: "VSphereMachineList" + plural: "vspheremachines" + singular: "vspheremachine" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Cluster to which this VSphereMachine belongs" + jsonPath: ".metadata.labels.cluster\\.x-k8s\\.io/cluster-name" + name: "Cluster" + type: "string" + - description: "Machine ready status" + jsonPath: ".status.ready" + name: "Ready" + type: "string" + - description: "VSphereMachine instance ID" + jsonPath: ".spec.providerID" + name: "ProviderID" + type: "string" + - description: "Machine object which owns this VSphereMachine" + jsonPath: ".metadata.ownerReferences[?(@.kind==\"Machine\")].name" + name: "Machine" + priority: 1 + type: "string" + - description: "Time duration since creation of Machine" + jsonPath: ".metadata.creationTimestamp" + name: "Age" + type: "date" + deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereMachine is the Schema for the vspheremachines API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereMachineSpec defines the desired state of VSphereMachine" + properties: + cloneMode: + description: "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots." + type: "string" + customVMXKeys: + additionalProperties: + type: "string" + description: "CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map" + type: "object" + datacenter: + description: "Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located." + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + diskGiB: + description: "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + failureDomain: + description: "FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone." + type: "string" + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + memoryMiB: + description: "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int64" + type: "integer" + network: + description: "Network is the network configuration for this machine's VM." + properties: + devices: + description: "Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name" + items: + description: "NetworkDeviceSpec defines the network configuration for a virtual machine's network device." + properties: + deviceName: + description: "DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system." + type: "string" + dhcp4: + description: "DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses." + type: "boolean" + dhcp6: + description: "DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses." + type: "boolean" + gateway4: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false." + type: "string" + gateway6: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false." + type: "string" + ipAddrs: + description: "IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider." + type: "string" + mtu: + description: "MTU is the device’s Maximum Transmission Unit size in bytes." + format: "int64" + type: "integer" + nameservers: + description: "Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf)." + items: + type: "string" + type: "array" + networkName: + description: "NetworkName is the name of the vSphere network to which the device will be connected." + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the device." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + searchDomains: + description: "SearchDomains is a list of search domains used when resolving IP addresses with DNS." + items: + type: "string" + type: "array" + required: + - "networkName" + type: "object" + type: "array" + preferredAPIServerCidr: + description: "PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine" + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the virtual machine." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + required: + - "devices" + type: "object" + numCPUs: + description: "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + numCoresPerSocket: + description: "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + providerID: + description: "ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc" + type: "string" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + server: + description: "Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located." + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot." + type: "string" + storagePolicyName: + description: "StoragePolicyName of the storage policy to use with this Virtual Machine" + type: "string" + template: + description: "Template is the name or inventory path of the template used to clone the virtual machine." + minLength: 1 + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server." + type: "string" + required: + - "network" + - "template" + type: "object" + status: + description: "VSphereMachineStatus defines the observed state of VSphereMachine" + properties: + addresses: + description: "Addresses contains the VSphere instance associated addresses." + items: + description: "MachineAddress contains information for the node's address." + properties: + address: + description: "The machine address." + type: "string" + type: + description: "Machine address type, one of Hostname, ExternalIP or InternalIP." + type: "string" + required: + - "address" + - "type" + type: "object" + type: "array" + conditions: + description: "Conditions defines current service state of the VSphereMachine." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureMessage: + description: "FailureMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: "string" + failureReason: + description: "FailureReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: "string" + network: + description: "Network returns the network status for each of the machine's configured network interfaces." + items: + description: "NetworkStatus provides information about one of a VM's networks." + properties: + connected: + description: "Connected is a flag that indicates whether this network is currently connected to the VM." + type: "boolean" + ipAddrs: + description: "IPAddrs is one or more IP addresses reported by vm-tools." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address of the network device." + type: "string" + networkName: + description: "NetworkName is the name of the network." + type: "string" + required: + - "macAddr" + type: "object" + type: "array" + ready: + description: "Ready is true when the provider resource is ready." + type: "boolean" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachinetemplates.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachinetemplates.yaml new file mode 100644 index 000000000..6a95ea224 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachinetemplates.yaml @@ -0,0 +1,225 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspheremachinetemplates.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereMachineTemplate" + listKind: "VSphereMachineTemplateList" + plural: "vspheremachinetemplates" + singular: "vspheremachinetemplate" + scope: "Namespaced" + versions: + - deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereMachineTemplate is the Schema for the vspheremachinetemplates API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereMachineTemplateSpec defines the desired state of VSphereMachineTemplate" + properties: + template: + description: "VSphereMachineTemplateResource describes the data needed to create a VSphereMachine from a template" + properties: + metadata: + description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" + type: "object" + labels: + additionalProperties: + type: "string" + description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" + type: "object" + type: "object" + spec: + description: "Spec is the specification of the desired behavior of the machine." + properties: + cloneMode: + description: "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots." + type: "string" + customVMXKeys: + additionalProperties: + type: "string" + description: "CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map" + type: "object" + datacenter: + description: "Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located." + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + diskGiB: + description: "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + failureDomain: + description: "FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone." + type: "string" + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + memoryMiB: + description: "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int64" + type: "integer" + network: + description: "Network is the network configuration for this machine's VM." + properties: + devices: + description: "Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name" + items: + description: "NetworkDeviceSpec defines the network configuration for a virtual machine's network device." + properties: + deviceName: + description: "DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system." + type: "string" + dhcp4: + description: "DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses." + type: "boolean" + dhcp6: + description: "DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses." + type: "boolean" + gateway4: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false." + type: "string" + gateway6: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false." + type: "string" + ipAddrs: + description: "IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider." + type: "string" + mtu: + description: "MTU is the device’s Maximum Transmission Unit size in bytes." + format: "int64" + type: "integer" + nameservers: + description: "Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf)." + items: + type: "string" + type: "array" + networkName: + description: "NetworkName is the name of the vSphere network to which the device will be connected." + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the device." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + searchDomains: + description: "SearchDomains is a list of search domains used when resolving IP addresses with DNS." + items: + type: "string" + type: "array" + required: + - "networkName" + type: "object" + type: "array" + preferredAPIServerCidr: + description: "PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine" + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the virtual machine." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + required: + - "devices" + type: "object" + numCPUs: + description: "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + numCoresPerSocket: + description: "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + providerID: + description: "ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc" + type: "string" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + server: + description: "Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located." + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot." + type: "string" + storagePolicyName: + description: "StoragePolicyName of the storage policy to use with this Virtual Machine" + type: "string" + template: + description: "Template is the name or inventory path of the template used to clone the virtual machine." + minLength: 1 + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server." + type: "string" + required: + - "network" + - "template" + type: "object" + required: + - "spec" + type: "object" + required: + - "template" + type: "object" + type: "object" + served: false + storage: false diff --git a/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherevms.yaml b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherevms.yaml new file mode 100644 index 000000000..03c34cada --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherevms.yaml @@ -0,0 +1,307 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.13.0" + name: "vspherevms.infrastructure.cluster.x-k8s.io" +spec: + group: "infrastructure.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "VSphereVM" + listKind: "VSphereVMList" + plural: "vspherevms" + singular: "vspherevm" + scope: "Namespaced" + versions: + - deprecated: true + name: "v1alpha4" + schema: + openAPIV3Schema: + description: "VSphereVM is the Schema for the vspherevms API \n Deprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "VSphereVMSpec defines the desired state of VSphereVM." + properties: + biosUUID: + description: "BiosUUID is the VM's BIOS UUID that is assigned at runtime after the VM has been created. This field is required at runtime for other controllers that read this CRD as unstructured data." + type: "string" + bootstrapRef: + description: "BootstrapRef is a reference to a bootstrap provider-specific resource that holds configuration details. This field is optional in case no bootstrap data is required to create a VM." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + cloneMode: + description: "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots." + type: "string" + customVMXKeys: + additionalProperties: + type: "string" + description: "CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map" + type: "object" + datacenter: + description: "Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located." + type: "string" + datastore: + description: "Datastore is the name or inventory path of the datastore in which the virtual machine is created/located." + type: "string" + diskGiB: + description: "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + folder: + description: "Folder is the name or inventory path of the folder in which the virtual machine is created/located." + type: "string" + memoryMiB: + description: "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int64" + type: "integer" + network: + description: "Network is the network configuration for this machine's VM." + properties: + devices: + description: "Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name" + items: + description: "NetworkDeviceSpec defines the network configuration for a virtual machine's network device." + properties: + deviceName: + description: "DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system." + type: "string" + dhcp4: + description: "DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses." + type: "boolean" + dhcp6: + description: "DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses." + type: "boolean" + gateway4: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false." + type: "string" + gateway6: + description: "Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false." + type: "string" + ipAddrs: + description: "IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider." + type: "string" + mtu: + description: "MTU is the device’s Maximum Transmission Unit size in bytes." + format: "int64" + type: "integer" + nameservers: + description: "Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf)." + items: + type: "string" + type: "array" + networkName: + description: "NetworkName is the name of the vSphere network to which the device will be connected." + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the device." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + searchDomains: + description: "SearchDomains is a list of search domains used when resolving IP addresses with DNS." + items: + type: "string" + type: "array" + required: + - "networkName" + type: "object" + type: "array" + preferredAPIServerCidr: + description: "PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine" + type: "string" + routes: + description: "Routes is a list of optional, static routes applied to the virtual machine." + items: + description: "NetworkRouteSpec defines a static network route." + properties: + metric: + description: "Metric is the weight/priority of the route." + format: "int32" + type: "integer" + to: + description: "To is an IPv4 or IPv6 address." + type: "string" + via: + description: "Via is an IPv4 or IPv6 address." + type: "string" + required: + - "metric" + - "to" + - "via" + type: "object" + type: "array" + required: + - "devices" + type: "object" + numCPUs: + description: "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + numCoresPerSocket: + description: "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned." + format: "int32" + type: "integer" + resourcePool: + description: "ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located." + type: "string" + server: + description: "Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located." + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot." + type: "string" + storagePolicyName: + description: "StoragePolicyName of the storage policy to use with this Virtual Machine" + type: "string" + template: + description: "Template is the name or inventory path of the template used to clone the virtual machine." + minLength: 1 + type: "string" + thumbprint: + description: "Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server." + type: "string" + required: + - "network" + - "template" + type: "object" + status: + description: "VSphereVMStatus defines the observed state of VSphereVM" + properties: + addresses: + description: "Addresses is a list of the VM's IP addresses. This field is required at runtime for other controllers that read this CRD as unstructured data." + items: + type: "string" + type: "array" + cloneMode: + description: "CloneMode is the type of clone operation used to clone this VM. Since LinkedMode is the default but fails gracefully if the source of the clone has no snapshots, this field may be used to determine the actual type of clone operation used to create this VM." + type: "string" + conditions: + description: "Conditions defines current service state of the VSphereVM." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition. This field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureMessage: + description: "FailureMessage will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. \n Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output." + type: "string" + failureReason: + description: "FailureReason will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a succinct value suitable for vm interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. \n Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output." + type: "string" + network: + description: "Network returns the network status for each of the machine's configured network interfaces." + items: + description: "NetworkStatus provides information about one of a VM's networks." + properties: + connected: + description: "Connected is a flag that indicates whether this network is currently connected to the VM." + type: "boolean" + ipAddrs: + description: "IPAddrs is one or more IP addresses reported by vm-tools." + items: + type: "string" + type: "array" + macAddr: + description: "MACAddr is the MAC address of the network device." + type: "string" + networkName: + description: "NetworkName is the name of the network." + type: "string" + required: + - "macAddr" + type: "object" + type: "array" + ready: + description: "Ready is true when the provider resource is ready. This field is required at runtime for other controllers that read this CRD as unstructured data." + type: "boolean" + retryAfter: + description: "RetryAfter tracks the time we can retry queueing a task" + format: "date-time" + type: "string" + snapshot: + description: "Snapshot is the name of the snapshot from which the VM was cloned if LinkedMode is enabled." + type: "string" + taskRef: + description: "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users." + type: "string" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesetbindings.yaml b/crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesetbindings.yaml new file mode 100644 index 000000000..8b271bc34 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesetbindings.yaml @@ -0,0 +1,83 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "clusterresourcesetbindings.addons.cluster.x-k8s.io" +spec: + group: "addons.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "ClusterResourceSetBinding" + listKind: "ClusterResourceSetBindingList" + plural: "clusterresourcesetbindings" + singular: "clusterresourcesetbinding" + scope: "Namespaced" + versions: + - deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "ClusterResourceSetBinding lists all matching ClusterResourceSets with the cluster it belongs to.\n\n\nDeprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "ClusterResourceSetBindingSpec defines the desired state of ClusterResourceSetBinding." + properties: + bindings: + description: "Bindings is a list of ClusterResourceSets and their resources." + items: + description: "ResourceSetBinding keeps info on all of the resources in a ClusterResourceSet." + properties: + clusterResourceSetName: + description: "ClusterResourceSetName is the name of the ClusterResourceSet that is applied to the owner cluster of the binding." + type: "string" + resources: + description: "Resources is a list of resources that the ClusterResourceSet has." + items: + description: "ResourceBinding shows the status of a resource that belongs to a ClusterResourceSet matched by the owner cluster of the ClusterResourceSetBinding object." + properties: + applied: + description: "Applied is to track if a resource is applied to the cluster or not." + type: "boolean" + hash: + description: "Hash is the hash of a resource's data. This can be used to decide if a resource is changed.\nFor \"ApplyOnce\" ClusterResourceSet.spec.strategy, this is no-op as that strategy does not act on change." + type: "string" + kind: + description: "Kind of the resource. Supported kinds are: Secrets and ConfigMaps." + enum: + - "Secret" + - "ConfigMap" + type: "string" + lastAppliedTime: + description: "LastAppliedTime identifies when this resource was last applied to the cluster." + format: "date-time" + type: "string" + name: + description: "Name of the resource that is in the same namespace with ClusterResourceSet object." + minLength: 1 + type: "string" + required: + - "applied" + - "kind" + - "name" + type: "object" + type: "array" + required: + - "clusterResourceSetName" + type: "object" + type: "array" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesets.yaml b/crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesets.yaml new file mode 100644 index 000000000..6867b3ee7 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesets.yaml @@ -0,0 +1,135 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "clusterresourcesets.addons.cluster.x-k8s.io" +spec: + group: "addons.cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "ClusterResourceSet" + listKind: "ClusterResourceSetList" + plural: "clusterresourcesets" + singular: "clusterresourceset" + scope: "Namespaced" + versions: + - deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "ClusterResourceSet is the Schema for the clusterresourcesets API.\n\n\nDeprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "ClusterResourceSetSpec defines the desired state of ClusterResourceSet." + properties: + clusterSelector: + description: "Label selector for Clusters. The Clusters that are\nselected by this will be the ones affected by this ClusterResourceSet.\nIt must match the Cluster labels. This field is immutable." + properties: + matchExpressions: + description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." + items: + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." + properties: + key: + description: "key is the label key that the selector applies to." + type: "string" + operator: + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + type: "string" + values: + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + items: + type: "string" + type: "array" + required: + - "key" + - "operator" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + type: "object" + type: "object" + x-kubernetes-map-type: "atomic" + resources: + description: "Resources is a list of Secrets/ConfigMaps where each contains 1 or more resources to be applied to remote clusters." + items: + description: "ResourceRef specifies a resource." + properties: + kind: + description: "Kind of the resource. Supported kinds are: Secrets and ConfigMaps." + enum: + - "Secret" + - "ConfigMap" + type: "string" + name: + description: "Name of the resource that is in the same namespace with ClusterResourceSet object." + minLength: 1 + type: "string" + required: + - "kind" + - "name" + type: "object" + type: "array" + strategy: + description: "Strategy is the strategy to be used during applying resources. Defaults to ApplyOnce. This field is immutable." + enum: + - "ApplyOnce" + type: "string" + required: + - "clusterSelector" + type: "object" + status: + description: "ClusterResourceSetStatus defines the observed state of ClusterResourceSet." + properties: + conditions: + description: "Conditions defines current state of the ClusterResourceSet." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when\nthe API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition.\nThis field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase.\nThe specific API may choose whether or not this field is considered a guaranteed API.\nThis field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately\nunderstand the current situation and act accordingly.\nThe Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase.\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions\ncan be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + observedGeneration: + description: "ObservedGeneration reflects the generation of the most recently observed ClusterResourceSet." + format: "int64" + type: "integer" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/clusters.yaml b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/clusters.yaml new file mode 100644 index 000000000..c25cd57c5 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/clusters.yaml @@ -0,0 +1,217 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "clusters.cluster.x-k8s.io" +spec: + group: "cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "Cluster" + listKind: "ClusterList" + plural: "clusters" + shortNames: + - "cl" + singular: "cluster" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Cluster status such as Pending/Provisioning/Provisioned/Deleting/Failed" + jsonPath: ".status.phase" + name: "Phase" + type: "string" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "Cluster is the Schema for the clusters API." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "ClusterSpec defines the desired state of Cluster." + properties: + clusterNetwork: + description: "Cluster network configuration." + properties: + apiServerPort: + description: "APIServerPort specifies the port the API Server should bind to.\nDefaults to 6443." + format: "int32" + type: "integer" + pods: + description: "The network ranges from which Pod networks are allocated." + properties: + cidrBlocks: + items: + type: "string" + type: "array" + required: + - "cidrBlocks" + type: "object" + serviceDomain: + description: "Domain name for services." + type: "string" + services: + description: "The network ranges from which service VIPs are allocated." + properties: + cidrBlocks: + items: + type: "string" + type: "array" + required: + - "cidrBlocks" + type: "object" + type: "object" + controlPlaneEndpoint: + description: "ControlPlaneEndpoint represents the endpoint used to communicate with the control plane." + properties: + host: + description: "The hostname on which the API server is serving." + type: "string" + port: + description: "The port on which the API server is serving." + format: "int32" + type: "integer" + required: + - "host" + - "port" + type: "object" + controlPlaneRef: + description: "ControlPlaneRef is an optional reference to a provider-specific resource that holds\nthe details for provisioning the Control Plane for a Cluster." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + infrastructureRef: + description: "InfrastructureRef is a reference to a provider-specific resource that holds the details\nfor provisioning infrastructure for a cluster in said provider." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + paused: + description: "Paused can be used to prevent controllers from processing the Cluster and all its associated objects." + type: "boolean" + type: "object" + status: + description: "ClusterStatus defines the observed state of Cluster." + properties: + conditions: + description: "Conditions defines current service state of the cluster." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when\nthe API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition.\nThis field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase.\nThe specific API may choose whether or not this field is considered a guaranteed API.\nThis field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately\nunderstand the current situation and act accordingly.\nThe Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase.\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions\ncan be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + controlPlaneInitialized: + description: "ControlPlaneInitialized defines if the control plane has been initialized." + type: "boolean" + controlPlaneReady: + description: "ControlPlaneReady defines if the control plane is ready." + type: "boolean" + failureDomains: + additionalProperties: + description: "FailureDomainSpec is the Schema for Cluster API failure domains.\nIt allows controllers to understand how many failure domains a cluster can optionally span across." + properties: + attributes: + additionalProperties: + type: "string" + description: "Attributes is a free form map of attributes an infrastructure provider might use or require." + type: "object" + controlPlane: + description: "ControlPlane determines if this failure domain is suitable for use by control plane machines." + type: "boolean" + type: "object" + description: "FailureDomains is a slice of failure domain objects synced from the infrastructure provider." + type: "object" + failureMessage: + description: "FailureMessage indicates that there is a fatal problem reconciling the\nstate, and will be set to a descriptive error message." + type: "string" + failureReason: + description: "FailureReason indicates that there is a fatal problem reconciling the\nstate, and will be set to a token value suitable for\nprogrammatic interpretation." + type: "string" + infrastructureReady: + description: "InfrastructureReady is the state of the infrastructure provider." + type: "boolean" + observedGeneration: + description: "ObservedGeneration is the latest generation observed by the controller." + format: "int64" + type: "integer" + phase: + description: "Phase represents the current phase of cluster actuation.\nE.g. Pending, Running, Terminating, Failed etc." + type: "string" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinedeployments.yaml b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinedeployments.yaml new file mode 100644 index 000000000..526ddc26c --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinedeployments.yaml @@ -0,0 +1,326 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "machinedeployments.cluster.x-k8s.io" +spec: + group: "cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "MachineDeployment" + listKind: "MachineDeploymentList" + plural: "machinedeployments" + shortNames: + - "md" + singular: "machinedeployment" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "MachineDeployment status such as ScalingUp/ScalingDown/Running/Failed/Unknown" + jsonPath: ".status.phase" + name: "Phase" + type: "string" + - description: "Total number of non-terminated machines targeted by this MachineDeployment" + jsonPath: ".status.replicas" + name: "Replicas" + type: "integer" + - description: "Total number of ready machines targeted by this MachineDeployment" + jsonPath: ".status.readyReplicas" + name: "Ready" + type: "integer" + - description: "Total number of non-terminated machines targeted by this deployment that have the desired template spec" + jsonPath: ".status.updatedReplicas" + name: "Updated" + type: "integer" + - description: "Total number of unavailable machines targeted by this MachineDeployment" + jsonPath: ".status.unavailableReplicas" + name: "Unavailable" + type: "integer" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "MachineDeployment is the Schema for the machinedeployments API.\n\n\nDeprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "MachineDeploymentSpec defines the desired state of MachineDeployment." + properties: + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + minReadySeconds: + description: "Minimum number of seconds for which a newly created machine should\nbe ready.\nDefaults to 0 (machine will be considered available as soon as it\nis ready)" + format: "int32" + type: "integer" + paused: + description: "Indicates that the deployment is paused." + type: "boolean" + progressDeadlineSeconds: + description: "The maximum time in seconds for a deployment to make progress before it\nis considered to be failed. The deployment controller will continue to\nprocess failed deployments and a condition with a ProgressDeadlineExceeded\nreason will be surfaced in the deployment status. Note that progress will\nnot be estimated during the time a deployment is paused. Defaults to 600s." + format: "int32" + type: "integer" + replicas: + description: "Number of desired machines. Defaults to 1.\nThis is a pointer to distinguish between explicit zero and not specified." + format: "int32" + type: "integer" + revisionHistoryLimit: + description: "The number of old MachineSets to retain to allow rollback.\nThis is a pointer to distinguish between explicit zero and not specified.\nDefaults to 1." + format: "int32" + type: "integer" + selector: + description: "Label selector for machines. Existing MachineSets whose machines are\nselected by this will be the ones affected by this deployment.\nIt must match the machine template's labels." + properties: + matchExpressions: + description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." + items: + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." + properties: + key: + description: "key is the label key that the selector applies to." + type: "string" + operator: + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + type: "string" + values: + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + items: + type: "string" + type: "array" + required: + - "key" + - "operator" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + type: "object" + type: "object" + x-kubernetes-map-type: "atomic" + strategy: + description: "The deployment strategy to use to replace existing machines with\nnew ones." + properties: + rollingUpdate: + description: "Rolling update config params. Present only if\nMachineDeploymentStrategyType = RollingUpdate." + properties: + maxSurge: + anyOf: + - type: "integer" + - type: "string" + description: "The maximum number of machines that can be scheduled above the\ndesired number of machines.\nValue can be an absolute number (ex: 5) or a percentage of\ndesired machines (ex: 10%).\nThis can not be 0 if MaxUnavailable is 0.\nAbsolute number is calculated from percentage by rounding up.\nDefaults to 1.\nExample: when this is set to 30%, the new MachineSet can be scaled\nup immediately when the rolling update starts, such that the total\nnumber of old and new machines do not exceed 130% of desired\nmachines. Once old machines have been killed, new MachineSet can\nbe scaled up further, ensuring that total number of machines running\nat any time during the update is at most 130% of desired machines." + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: "integer" + - type: "string" + description: "The maximum number of machines that can be unavailable during the update.\nValue can be an absolute number (ex: 5) or a percentage of desired\nmachines (ex: 10%).\nAbsolute number is calculated from percentage by rounding down.\nThis can not be 0 if MaxSurge is 0.\nDefaults to 0.\nExample: when this is set to 30%, the old MachineSet can be scaled\ndown to 70% of desired machines immediately when the rolling update\nstarts. Once new machines are ready, old MachineSet can be scaled\ndown further, followed by scaling up the new MachineSet, ensuring\nthat the total number of machines available at all times\nduring the update is at least 70% of desired machines." + x-kubernetes-int-or-string: true + type: "object" + type: + description: "Type of deployment. Currently the only supported strategy is\n\"RollingUpdate\".\nDefault is RollingUpdate." + type: "string" + type: "object" + template: + description: "Template describes the machines that will be created." + properties: + metadata: + description: "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations" + type: "object" + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + labels: + additionalProperties: + type: "string" + description: "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels" + type: "object" + name: + description: "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + ownerReferences: + description: "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + items: + description: "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + blockOwnerDeletion: + description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned." + type: "boolean" + controller: + description: "If true, this reference points to the managing controller." + type: "boolean" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids" + type: "string" + required: + - "apiVersion" + - "kind" + - "name" + - "uid" + type: "object" + x-kubernetes-map-type: "atomic" + type: "array" + type: "object" + spec: + description: "Specification of the desired behavior of the machine.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + properties: + bootstrap: + description: "Bootstrap is a reference to a local struct which encapsulates\nfields to configure the Machine’s bootstrapping mechanism." + properties: + configRef: + description: "ConfigRef is a reference to a bootstrap provider-specific resource\nthat holds configuration details. The reference is optional to\nallow users/operators to specify Bootstrap.Data without\nthe need of a controller." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + data: + description: "Data contains the bootstrap data, such as cloud-init details scripts.\nIf nil, the Machine should remain in the Pending state.\n\n\nDeprecated: Switch to DataSecretName." + type: "string" + dataSecretName: + description: "DataSecretName is the name of the secret that stores the bootstrap data script.\nIf nil, the Machine should remain in the Pending state." + type: "string" + type: "object" + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + failureDomain: + description: "FailureDomain is the failure domain the machine will be created in.\nMust match a key in the FailureDomains map stored on the cluster object." + type: "string" + infrastructureRef: + description: "InfrastructureRef is a required reference to a custom resource\noffered by an infrastructure provider." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + nodeDrainTimeout: + description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node.\nThe default value is 0, meaning that the node can be drained without any time limitations.\nNOTE: NodeDrainTimeout is different from `kubectl drain --timeout`" + type: "string" + providerID: + description: "ProviderID is the identification ID of the machine provided by the provider.\nThis field must match the provider ID as seen on the node object corresponding to this machine.\nThis field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler\nwith cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out\nmachines at provider which could not get registered as Kubernetes nodes. With cluster-api as a\ngeneric out-of-tree provider for autoscaler, this field is required by autoscaler to be\nable to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver\nand then a comparison is done to find out unregistered machines and are marked for delete.\nThis field will be set by the actuators and consumed by higher level entities like autoscaler that will\nbe interfacing with cluster-api as generic provider." + type: "string" + version: + description: "Version defines the desired Kubernetes version.\nThis field is meant to be optionally used by bootstrap providers." + type: "string" + required: + - "bootstrap" + - "clusterName" + - "infrastructureRef" + type: "object" + type: "object" + required: + - "clusterName" + - "selector" + - "template" + type: "object" + status: + description: "MachineDeploymentStatus defines the observed state of MachineDeployment." + properties: + availableReplicas: + description: "Total number of available machines (ready for at least minReadySeconds)\ntargeted by this deployment." + format: "int32" + type: "integer" + observedGeneration: + description: "The generation observed by the deployment controller." + format: "int64" + type: "integer" + phase: + description: "Phase represents the current phase of a MachineDeployment (ScalingUp, ScalingDown, Running, Failed, or Unknown)." + type: "string" + readyReplicas: + description: "Total number of ready machines targeted by this deployment." + format: "int32" + type: "integer" + replicas: + description: "Total number of non-terminated machines targeted by this deployment\n(their labels match the selector)." + format: "int32" + type: "integer" + selector: + description: "Selector is the same as the label selector but in the string format to avoid introspection\nby clients. The string will be in the same format as the query-param syntax.\nMore info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors" + type: "string" + unavailableReplicas: + description: "Total number of unavailable machines targeted by this deployment.\nThis is the total number of machines that are still required for\nthe deployment to have 100% available capacity. They may either\nbe machines that are running but not yet available or machines\nthat still have not been created." + format: "int32" + type: "integer" + updatedReplicas: + description: "Total number of non-terminated machines targeted by this deployment\nthat have the desired template spec." + format: "int32" + type: "integer" + type: "object" + type: "object" + served: false + storage: false + subresources: + scale: + labelSelectorPath: ".status.selector" + specReplicasPath: ".spec.replicas" + statusReplicasPath: ".status.replicas" + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinehealthchecks.yaml b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinehealthchecks.yaml new file mode 100644 index 000000000..252af4fd1 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinehealthchecks.yaml @@ -0,0 +1,207 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "machinehealthchecks.cluster.x-k8s.io" +spec: + group: "cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "MachineHealthCheck" + listKind: "MachineHealthCheckList" + plural: "machinehealthchecks" + shortNames: + - "mhc" + - "mhcs" + singular: "machinehealthcheck" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Maximum number of unhealthy machines allowed" + jsonPath: ".spec.maxUnhealthy" + name: "MaxUnhealthy" + type: "string" + - description: "Number of machines currently monitored" + jsonPath: ".status.expectedMachines" + name: "ExpectedMachines" + type: "integer" + - description: "Current observed healthy machines" + jsonPath: ".status.currentHealthy" + name: "CurrentHealthy" + type: "integer" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "MachineHealthCheck is the Schema for the machinehealthchecks API.\n\n\nDeprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "Specification of machine health check policy" + properties: + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + maxUnhealthy: + anyOf: + - type: "integer" + - type: "string" + description: "Any further remediation is only allowed if at most \"MaxUnhealthy\" machines selected by\n\"selector\" are not healthy." + x-kubernetes-int-or-string: true + nodeStartupTimeout: + description: "Machines older than this duration without a node will be considered to have\nfailed and will be remediated." + type: "string" + remediationTemplate: + description: "RemediationTemplate is a reference to a remediation template\nprovided by an infrastructure provider.\n\n\nThis field is completely optional, when filled, the MachineHealthCheck controller\ncreates a new object from the template referenced and hands off remediation of the machine to\na controller that lives outside of Cluster API." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + selector: + description: "Label selector to match machines whose health will be exercised" + properties: + matchExpressions: + description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." + items: + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." + properties: + key: + description: "key is the label key that the selector applies to." + type: "string" + operator: + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + type: "string" + values: + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + items: + type: "string" + type: "array" + required: + - "key" + - "operator" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + type: "object" + type: "object" + x-kubernetes-map-type: "atomic" + unhealthyConditions: + description: "UnhealthyConditions contains a list of the conditions that determine\nwhether a node is considered unhealthy. The conditions are combined in a\nlogical OR, i.e. if any of the conditions is met, the node is unhealthy." + items: + description: "UnhealthyCondition represents a Node condition type and value with a timeout\nspecified as a duration. When the named condition has been in the given\nstatus for at least the timeout value, a node is considered unhealthy." + properties: + status: + minLength: 1 + type: "string" + timeout: + type: "string" + type: + minLength: 1 + type: "string" + required: + - "status" + - "timeout" + - "type" + type: "object" + minItems: 1 + type: "array" + required: + - "clusterName" + - "selector" + - "unhealthyConditions" + type: "object" + status: + description: "Most recently observed status of MachineHealthCheck resource" + properties: + conditions: + description: "Conditions defines current service state of the MachineHealthCheck." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when\nthe API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition.\nThis field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase.\nThe specific API may choose whether or not this field is considered a guaranteed API.\nThis field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately\nunderstand the current situation and act accordingly.\nThe Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase.\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions\ncan be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + currentHealthy: + description: "total number of healthy machines counted by this machine health check" + format: "int32" + minimum: 0.0 + type: "integer" + expectedMachines: + description: "total number of machines counted by this machine health check" + format: "int32" + minimum: 0.0 + type: "integer" + observedGeneration: + description: "ObservedGeneration is the latest generation observed by the controller." + format: "int64" + type: "integer" + remediationsAllowed: + description: "RemediationsAllowed is the number of further remediations allowed by this machine health check before\nmaxUnhealthy short circuiting will be applied" + format: "int32" + minimum: 0.0 + type: "integer" + targets: + description: "Targets shows the current list of machines the machine health check is watching" + items: + type: "string" + type: "array" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinepools.yaml b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinepools.yaml new file mode 100644 index 000000000..b598cdff4 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinepools.yaml @@ -0,0 +1,347 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "machinepools.cluster.x-k8s.io" +spec: + group: "cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "MachinePool" + listKind: "MachinePoolList" + plural: "machinepools" + shortNames: + - "mp" + singular: "machinepool" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "MachinePool replicas count" + jsonPath: ".status.replicas" + name: "Replicas" + type: "string" + - description: "MachinePool status such as Terminating/Pending/Provisioning/Running/Failed etc" + jsonPath: ".status.phase" + name: "Phase" + type: "string" + - description: "Kubernetes version associated with this MachinePool" + jsonPath: ".spec.template.spec.version" + name: "Version" + type: "string" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "MachinePool is the Schema for the machinepools API.\n\n\nDeprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "MachinePoolSpec defines the desired state of MachinePool." + properties: + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + failureDomains: + description: "FailureDomains is the list of failure domains this MachinePool should be attached to." + items: + type: "string" + type: "array" + minReadySeconds: + description: "Minimum number of seconds for which a newly created machine instances should\nbe ready.\nDefaults to 0 (machine instance will be considered available as soon as it\nis ready)" + format: "int32" + type: "integer" + providerIDList: + description: "ProviderIDList are the identification IDs of machine instances provided by the provider.\nThis field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances." + items: + type: "string" + type: "array" + replicas: + description: "Number of desired machines. Defaults to 1.\nThis is a pointer to distinguish between explicit zero and not specified." + format: "int32" + type: "integer" + strategy: + description: "The deployment strategy to use to replace existing machine instances with\nnew ones." + properties: + rollingUpdate: + description: "Rolling update config params. Present only if\nMachineDeploymentStrategyType = RollingUpdate." + properties: + maxSurge: + anyOf: + - type: "integer" + - type: "string" + description: "The maximum number of machines that can be scheduled above the\ndesired number of machines.\nValue can be an absolute number (ex: 5) or a percentage of\ndesired machines (ex: 10%).\nThis can not be 0 if MaxUnavailable is 0.\nAbsolute number is calculated from percentage by rounding up.\nDefaults to 1.\nExample: when this is set to 30%, the new MachineSet can be scaled\nup immediately when the rolling update starts, such that the total\nnumber of old and new machines do not exceed 130% of desired\nmachines. Once old machines have been killed, new MachineSet can\nbe scaled up further, ensuring that total number of machines running\nat any time during the update is at most 130% of desired machines." + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: "integer" + - type: "string" + description: "The maximum number of machines that can be unavailable during the update.\nValue can be an absolute number (ex: 5) or a percentage of desired\nmachines (ex: 10%).\nAbsolute number is calculated from percentage by rounding down.\nThis can not be 0 if MaxSurge is 0.\nDefaults to 0.\nExample: when this is set to 30%, the old MachineSet can be scaled\ndown to 70% of desired machines immediately when the rolling update\nstarts. Once new machines are ready, old MachineSet can be scaled\ndown further, followed by scaling up the new MachineSet, ensuring\nthat the total number of machines available at all times\nduring the update is at least 70% of desired machines." + x-kubernetes-int-or-string: true + type: "object" + type: + description: "Type of deployment. Currently the only supported strategy is\n\"RollingUpdate\".\nDefault is RollingUpdate." + type: "string" + type: "object" + template: + description: "Template describes the machines that will be created." + properties: + metadata: + description: "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations" + type: "object" + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + labels: + additionalProperties: + type: "string" + description: "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels" + type: "object" + name: + description: "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + ownerReferences: + description: "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + items: + description: "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + blockOwnerDeletion: + description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned." + type: "boolean" + controller: + description: "If true, this reference points to the managing controller." + type: "boolean" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids" + type: "string" + required: + - "apiVersion" + - "kind" + - "name" + - "uid" + type: "object" + x-kubernetes-map-type: "atomic" + type: "array" + type: "object" + spec: + description: "Specification of the desired behavior of the machine.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + properties: + bootstrap: + description: "Bootstrap is a reference to a local struct which encapsulates\nfields to configure the Machine’s bootstrapping mechanism." + properties: + configRef: + description: "ConfigRef is a reference to a bootstrap provider-specific resource\nthat holds configuration details. The reference is optional to\nallow users/operators to specify Bootstrap.Data without\nthe need of a controller." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + data: + description: "Data contains the bootstrap data, such as cloud-init details scripts.\nIf nil, the Machine should remain in the Pending state.\n\n\nDeprecated: Switch to DataSecretName." + type: "string" + dataSecretName: + description: "DataSecretName is the name of the secret that stores the bootstrap data script.\nIf nil, the Machine should remain in the Pending state." + type: "string" + type: "object" + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + failureDomain: + description: "FailureDomain is the failure domain the machine will be created in.\nMust match a key in the FailureDomains map stored on the cluster object." + type: "string" + infrastructureRef: + description: "InfrastructureRef is a required reference to a custom resource\noffered by an infrastructure provider." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + nodeDrainTimeout: + description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node.\nThe default value is 0, meaning that the node can be drained without any time limitations.\nNOTE: NodeDrainTimeout is different from `kubectl drain --timeout`" + type: "string" + providerID: + description: "ProviderID is the identification ID of the machine provided by the provider.\nThis field must match the provider ID as seen on the node object corresponding to this machine.\nThis field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler\nwith cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out\nmachines at provider which could not get registered as Kubernetes nodes. With cluster-api as a\ngeneric out-of-tree provider for autoscaler, this field is required by autoscaler to be\nable to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver\nand then a comparison is done to find out unregistered machines and are marked for delete.\nThis field will be set by the actuators and consumed by higher level entities like autoscaler that will\nbe interfacing with cluster-api as generic provider." + type: "string" + version: + description: "Version defines the desired Kubernetes version.\nThis field is meant to be optionally used by bootstrap providers." + type: "string" + required: + - "bootstrap" + - "clusterName" + - "infrastructureRef" + type: "object" + type: "object" + required: + - "clusterName" + - "template" + type: "object" + status: + description: "MachinePoolStatus defines the observed state of MachinePool." + properties: + availableReplicas: + description: "The number of available replicas (ready for at least minReadySeconds) for this MachinePool." + format: "int32" + type: "integer" + bootstrapReady: + description: "BootstrapReady is the state of the bootstrap provider." + type: "boolean" + conditions: + description: "Conditions define the current service state of the MachinePool." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when\nthe API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition.\nThis field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase.\nThe specific API may choose whether or not this field is considered a guaranteed API.\nThis field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately\nunderstand the current situation and act accordingly.\nThe Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase.\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions\ncan be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureMessage: + description: "FailureMessage indicates that there is a problem reconciling the state,\nand will be set to a descriptive error message." + type: "string" + failureReason: + description: "FailureReason indicates that there is a problem reconciling the state, and\nwill be set to a token value suitable for programmatic interpretation." + type: "string" + infrastructureReady: + description: "InfrastructureReady is the state of the infrastructure provider." + type: "boolean" + nodeRefs: + description: "NodeRefs will point to the corresponding Nodes if it they exist." + items: + description: "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 ." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + type: "array" + observedGeneration: + description: "ObservedGeneration is the latest generation observed by the controller." + format: "int64" + type: "integer" + phase: + description: "Phase represents the current phase of cluster actuation.\nE.g. Pending, Running, Terminating, Failed etc." + type: "string" + readyReplicas: + description: "The number of ready replicas for this MachinePool. A machine is considered ready when the node has been created and is \"Ready\"." + format: "int32" + type: "integer" + replicas: + description: "Replicas is the most recently observed number of replicas." + format: "int32" + type: "integer" + unavailableReplicas: + description: "Total number of unavailable machine instances targeted by this machine pool.\nThis is the total number of machine instances that are still required for\nthe machine pool to have 100% available capacity. They may either\nbe machine instances that are running but not yet available or machine instances\nthat still have not been created." + format: "int32" + type: "integer" + type: "object" + type: "object" + served: false + storage: false + subresources: + scale: + specReplicasPath: ".spec.replicas" + statusReplicasPath: ".status.replicas" + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machines.yaml b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machines.yaml new file mode 100644 index 000000000..dd4d62273 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machines.yaml @@ -0,0 +1,243 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "machines.cluster.x-k8s.io" +spec: + group: "cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "Machine" + listKind: "MachineList" + plural: "machines" + shortNames: + - "ma" + singular: "machine" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Provider ID" + jsonPath: ".spec.providerID" + name: "ProviderID" + type: "string" + - description: "Machine status such as Terminating/Pending/Running/Failed etc" + jsonPath: ".status.phase" + name: "Phase" + type: "string" + - description: "Kubernetes version associated with this Machine" + jsonPath: ".spec.version" + name: "Version" + type: "string" + - description: "Node name associated with this machine" + jsonPath: ".status.nodeRef.name" + name: "NodeName" + priority: 1 + type: "string" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "Machine is the Schema for the machines API.\n\n\nDeprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "MachineSpec defines the desired state of Machine." + properties: + bootstrap: + description: "Bootstrap is a reference to a local struct which encapsulates\nfields to configure the Machine’s bootstrapping mechanism." + properties: + configRef: + description: "ConfigRef is a reference to a bootstrap provider-specific resource\nthat holds configuration details. The reference is optional to\nallow users/operators to specify Bootstrap.Data without\nthe need of a controller." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + data: + description: "Data contains the bootstrap data, such as cloud-init details scripts.\nIf nil, the Machine should remain in the Pending state.\n\n\nDeprecated: Switch to DataSecretName." + type: "string" + dataSecretName: + description: "DataSecretName is the name of the secret that stores the bootstrap data script.\nIf nil, the Machine should remain in the Pending state." + type: "string" + type: "object" + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + failureDomain: + description: "FailureDomain is the failure domain the machine will be created in.\nMust match a key in the FailureDomains map stored on the cluster object." + type: "string" + infrastructureRef: + description: "InfrastructureRef is a required reference to a custom resource\noffered by an infrastructure provider." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + nodeDrainTimeout: + description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node.\nThe default value is 0, meaning that the node can be drained without any time limitations.\nNOTE: NodeDrainTimeout is different from `kubectl drain --timeout`" + type: "string" + providerID: + description: "ProviderID is the identification ID of the machine provided by the provider.\nThis field must match the provider ID as seen on the node object corresponding to this machine.\nThis field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler\nwith cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out\nmachines at provider which could not get registered as Kubernetes nodes. With cluster-api as a\ngeneric out-of-tree provider for autoscaler, this field is required by autoscaler to be\nable to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver\nand then a comparison is done to find out unregistered machines and are marked for delete.\nThis field will be set by the actuators and consumed by higher level entities like autoscaler that will\nbe interfacing with cluster-api as generic provider." + type: "string" + version: + description: "Version defines the desired Kubernetes version.\nThis field is meant to be optionally used by bootstrap providers." + type: "string" + required: + - "bootstrap" + - "clusterName" + - "infrastructureRef" + type: "object" + status: + description: "MachineStatus defines the observed state of Machine." + properties: + addresses: + description: "Addresses is a list of addresses assigned to the machine.\nThis field is copied from the infrastructure provider reference." + items: + description: "MachineAddress contains information for the node's address." + properties: + address: + description: "The machine address." + type: "string" + type: + description: "Machine address type, one of Hostname, ExternalIP or InternalIP." + type: "string" + required: + - "address" + - "type" + type: "object" + type: "array" + bootstrapReady: + description: "BootstrapReady is the state of the bootstrap provider." + type: "boolean" + conditions: + description: "Conditions defines current service state of the Machine." + items: + description: "Condition defines an observation of a Cluster API resource operational state." + properties: + lastTransitionTime: + description: "Last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when\nthe API field changed is acceptable." + format: "date-time" + type: "string" + message: + description: "A human readable message indicating details about the transition.\nThis field may be empty." + type: "string" + reason: + description: "The reason for the condition's last transition in CamelCase.\nThe specific API may choose whether or not this field is considered a guaranteed API.\nThis field may not be empty." + type: "string" + severity: + description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately\nunderstand the current situation and act accordingly.\nThe Severity field MUST be set only when Status=False." + type: "string" + status: + description: "Status of the condition, one of True, False, Unknown." + type: "string" + type: + description: "Type of condition in CamelCase or in foo.example.com/CamelCase.\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions\ncan be useful (see .node.status.conditions), the ability to deconflict is important." + type: "string" + required: + - "status" + - "type" + type: "object" + type: "array" + failureMessage: + description: "FailureMessage will be set in the event that there is a terminal problem\nreconciling the Machine and will contain a more verbose string suitable\nfor logging and human consumption.\n\n\nThis field should not be set for transitive errors that a controller\nfaces that are expected to be fixed automatically over\ntime (like service outages), but instead indicate that something is\nfundamentally wrong with the Machine's spec or the configuration of\nthe controller, and that manual intervention is required. Examples\nof terminal errors would be invalid combinations of settings in the\nspec, values that are unsupported by the controller, or the\nresponsible controller itself being critically misconfigured.\n\n\nAny transient errors that occur during the reconciliation of Machines\ncan be added as events to the Machine object and/or logged in the\ncontroller's output." + type: "string" + failureReason: + description: "FailureReason will be set in the event that there is a terminal problem\nreconciling the Machine and will contain a succinct value suitable\nfor machine interpretation.\n\n\nThis field should not be set for transitive errors that a controller\nfaces that are expected to be fixed automatically over\ntime (like service outages), but instead indicate that something is\nfundamentally wrong with the Machine's spec or the configuration of\nthe controller, and that manual intervention is required. Examples\nof terminal errors would be invalid combinations of settings in the\nspec, values that are unsupported by the controller, or the\nresponsible controller itself being critically misconfigured.\n\n\nAny transient errors that occur during the reconciliation of Machines\ncan be added as events to the Machine object and/or logged in the\ncontroller's output." + type: "string" + infrastructureReady: + description: "InfrastructureReady is the state of the infrastructure provider." + type: "boolean" + lastUpdated: + description: "LastUpdated identifies when the phase of the Machine last transitioned." + format: "date-time" + type: "string" + nodeRef: + description: "NodeRef will point to the corresponding Node if it exists." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + observedGeneration: + description: "ObservedGeneration is the latest generation observed by the controller." + format: "int64" + type: "integer" + phase: + description: "Phase represents the current phase of machine actuation.\nE.g. Pending, Running, Terminating, Failed etc." + type: "string" + version: + description: "Version specifies the current version of Kubernetes running\non the corresponding Node. This is meant to be a means of bubbling\nup status from the Node to the Machine.\nIt is entirely optional, but useful for end-user UX if it’s present." + type: "string" + type: "object" + type: "object" + served: false + storage: false + subresources: + status: {} diff --git a/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinesets.yaml b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinesets.yaml new file mode 100644 index 000000000..c82a40969 --- /dev/null +++ b/crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinesets.yaml @@ -0,0 +1,288 @@ +apiVersion: "apiextensions.k8s.io/v1" +kind: "CustomResourceDefinition" +metadata: + annotations: + controller-gen.kubebuilder.io/version: "v0.14.0" + name: "machinesets.cluster.x-k8s.io" +spec: + group: "cluster.x-k8s.io" + names: + categories: + - "cluster-api" + kind: "MachineSet" + listKind: "MachineSetList" + plural: "machinesets" + shortNames: + - "ms" + singular: "machineset" + scope: "Namespaced" + versions: + - additionalPrinterColumns: + - description: "Total number of non-terminated machines targeted by this machineset" + jsonPath: ".status.replicas" + name: "Replicas" + type: "integer" + - description: "Total number of available machines (ready for at least minReadySeconds)" + jsonPath: ".status.availableReplicas" + name: "Available" + type: "integer" + - description: "Total number of ready machines targeted by this machineset." + jsonPath: ".status.readyReplicas" + name: "Ready" + type: "integer" + deprecated: true + name: "v1alpha3" + schema: + openAPIV3Schema: + description: "MachineSet is the Schema for the machinesets API.\n\n\nDeprecated: This type will be removed in one of the next releases." + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: "string" + kind: + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + metadata: + type: "object" + spec: + description: "MachineSetSpec defines the desired state of MachineSet." + properties: + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + deletePolicy: + description: "DeletePolicy defines the policy used to identify nodes to delete when downscaling.\nDefaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"" + enum: + - "Random" + - "Newest" + - "Oldest" + type: "string" + minReadySeconds: + description: "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready.\nDefaults to 0 (machine will be considered available as soon as it is ready)" + format: "int32" + type: "integer" + replicas: + description: "Replicas is the number of desired replicas.\nThis is a pointer to distinguish between explicit zero and unspecified.\nDefaults to 1." + format: "int32" + type: "integer" + selector: + description: "Selector is a label query over machines that should match the replica count.\nLabel keys and values that must match in order to be controlled by this MachineSet.\nIt must match the machine template's labels.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" + properties: + matchExpressions: + description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." + items: + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." + properties: + key: + description: "key is the label key that the selector applies to." + type: "string" + operator: + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + type: "string" + values: + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." + items: + type: "string" + type: "array" + required: + - "key" + - "operator" + type: "object" + type: "array" + matchLabels: + additionalProperties: + type: "string" + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + type: "object" + type: "object" + x-kubernetes-map-type: "atomic" + template: + description: "Template is the object that describes the machine that will be created if\ninsufficient replicas are detected.\nObject references to custom resources are treated as templates." + properties: + metadata: + description: "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + properties: + annotations: + additionalProperties: + type: "string" + description: "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations" + type: "object" + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + labels: + additionalProperties: + type: "string" + description: "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels" + type: "object" + name: + description: "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + type: "string" + ownerReferences: + description: "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n\n\nDeprecated: This field has no function and is going to be removed in a next release." + items: + description: "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + blockOwnerDeletion: + description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned." + type: "boolean" + controller: + description: "If true, this reference points to the managing controller." + type: "boolean" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids" + type: "string" + required: + - "apiVersion" + - "kind" + - "name" + - "uid" + type: "object" + x-kubernetes-map-type: "atomic" + type: "array" + type: "object" + spec: + description: "Specification of the desired behavior of the machine.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + properties: + bootstrap: + description: "Bootstrap is a reference to a local struct which encapsulates\nfields to configure the Machine’s bootstrapping mechanism." + properties: + configRef: + description: "ConfigRef is a reference to a bootstrap provider-specific resource\nthat holds configuration details. The reference is optional to\nallow users/operators to specify Bootstrap.Data without\nthe need of a controller." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + data: + description: "Data contains the bootstrap data, such as cloud-init details scripts.\nIf nil, the Machine should remain in the Pending state.\n\n\nDeprecated: Switch to DataSecretName." + type: "string" + dataSecretName: + description: "DataSecretName is the name of the secret that stores the bootstrap data script.\nIf nil, the Machine should remain in the Pending state." + type: "string" + type: "object" + clusterName: + description: "ClusterName is the name of the Cluster this object belongs to." + minLength: 1 + type: "string" + failureDomain: + description: "FailureDomain is the failure domain the machine will be created in.\nMust match a key in the FailureDomains map stored on the cluster object." + type: "string" + infrastructureRef: + description: "InfrastructureRef is a required reference to a custom resource\noffered by an infrastructure provider." + properties: + apiVersion: + description: "API version of the referent." + type: "string" + fieldPath: + description: "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future." + type: "string" + kind: + description: "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: "string" + name: + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: "string" + namespace: + description: "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/" + type: "string" + resourceVersion: + description: "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency" + type: "string" + uid: + description: "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids" + type: "string" + type: "object" + x-kubernetes-map-type: "atomic" + nodeDrainTimeout: + description: "NodeDrainTimeout is the total amount of time that the controller will spend on draining a node.\nThe default value is 0, meaning that the node can be drained without any time limitations.\nNOTE: NodeDrainTimeout is different from `kubectl drain --timeout`" + type: "string" + providerID: + description: "ProviderID is the identification ID of the machine provided by the provider.\nThis field must match the provider ID as seen on the node object corresponding to this machine.\nThis field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler\nwith cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out\nmachines at provider which could not get registered as Kubernetes nodes. With cluster-api as a\ngeneric out-of-tree provider for autoscaler, this field is required by autoscaler to be\nable to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver\nand then a comparison is done to find out unregistered machines and are marked for delete.\nThis field will be set by the actuators and consumed by higher level entities like autoscaler that will\nbe interfacing with cluster-api as generic provider." + type: "string" + version: + description: "Version defines the desired Kubernetes version.\nThis field is meant to be optionally used by bootstrap providers." + type: "string" + required: + - "bootstrap" + - "clusterName" + - "infrastructureRef" + type: "object" + type: "object" + required: + - "clusterName" + - "selector" + type: "object" + status: + description: "MachineSetStatus defines the observed state of MachineSet." + properties: + availableReplicas: + description: "The number of available replicas (ready for at least minReadySeconds) for this MachineSet." + format: "int32" + type: "integer" + failureMessage: + type: "string" + failureReason: + description: "In the event that there is a terminal problem reconciling the\nreplicas, both FailureReason and FailureMessage will be set. FailureReason\nwill be populated with a succinct value suitable for machine\ninterpretation, while FailureMessage will contain a more verbose\nstring suitable for logging and human consumption.\n\n\nThese fields should not be set for transitive errors that a\ncontroller faces that are expected to be fixed automatically over\ntime (like service outages), but instead indicate that something is\nfundamentally wrong with the MachineTemplate's spec or the configuration of\nthe machine controller, and that manual intervention is required. Examples\nof terminal errors would be invalid combinations of settings in the\nspec, values that are unsupported by the machine controller, or the\nresponsible machine controller itself being critically misconfigured.\n\n\nAny transient errors that occur during the reconciliation of Machines\ncan be added as events to the MachineSet object and/or logged in the\ncontroller's output." + type: "string" + fullyLabeledReplicas: + description: "The number of replicas that have labels matching the labels of the machine template of the MachineSet." + format: "int32" + type: "integer" + observedGeneration: + description: "ObservedGeneration reflects the generation of the most recently observed MachineSet." + format: "int64" + type: "integer" + readyReplicas: + description: "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\"." + format: "int32" + type: "integer" + replicas: + description: "Replicas is the most recently observed number of replicas." + format: "int32" + type: "integer" + selector: + description: "Selector is the same as the label selector but in the string format to avoid introspection\nby clients. The string will be in the same format as the query-param syntax.\nMore info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors" + type: "string" + type: "object" + type: "object" + served: false + storage: false + subresources: + scale: + labelSelectorPath: ".status.selector" + specReplicasPath: ".spec.replicas" + statusReplicasPath: ".status.replicas" + status: {} diff --git a/crd-catalog/kubernetes-sigs/jobset/jobset.x-k8s.io/v1alpha2/jobsets.yaml b/crd-catalog/kubernetes-sigs/jobset/jobset.x-k8s.io/v1alpha2/jobsets.yaml index d6686cca0..879a4a0f0 100644 --- a/crd-catalog/kubernetes-sigs/jobset/jobset.x-k8s.io/v1alpha2/jobsets.yaml +++ b/crd-catalog/kubernetes-sigs/jobset/jobset.x-k8s.io/v1alpha2/jobsets.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.11.4" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "jobsets.jobset.x-k8s.io" spec: group: "jobset.x-k8s.io" @@ -21,6 +21,10 @@ spec: - jsonPath: ".status.conditions[?(@.type==\"Completed\")].status" name: "Completed" type: "string" + - description: "JobSet suspended" + jsonPath: ".spec.suspend" + name: "Suspended" + type: "string" - description: "Time this JobSet was created" jsonPath: ".metadata.creationTimestamp" name: "Age" @@ -31,10 +35,10 @@ spec: description: "JobSet is the Schema for the jobsets API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -42,10 +46,10 @@ spec: description: "JobSetSpec defines the desired state of JobSet" properties: failurePolicy: - description: "FailurePolicy, if set, configures when to declare the JobSet as failed. The JobSet is always declared failed if any job in the set finished with status failed." + description: "FailurePolicy, if set, configures when to declare the JobSet as\nfailed.\nThe JobSet is always declared failed if any job in the set\nfinished with status failed." properties: maxRestarts: - description: "MaxRestarts defines the limit on the number of JobSet restarts. A restart is achieved by recreating all active child jobs." + description: "MaxRestarts defines the limit on the number of JobSet restarts.\nA restart is achieved by recreating all active child jobs." format: "int32" type: "integer" type: "object" @@ -56,10 +60,10 @@ spec: description: "Network defines the networking options for the jobset." properties: enableDNSHostnames: - description: "EnableDNSHostnames allows pods to be reached via their hostnames. Pods will be reachable using the fully qualified pod hostname: ---." + description: "EnableDNSHostnames allows pods to be reached via their hostnames.\nPods will be reachable using the fully qualified pod hostname:\n---." type: "boolean" subdomain: - description: "Subdomain is an explicit choice for a network subdomain name When set, any replicated job in the set is added to this network. Defaults to if not set." + description: "Subdomain is an explicit choice for a network subdomain name\nWhen set, any replicated job in the set is added to this network.\nDefaults to if not set." type: "string" type: "object" x-kubernetes-validations: @@ -70,18 +74,18 @@ spec: items: properties: name: - description: "Name is the name of the entry and will be used as a suffix for the Job name." + description: "Name is the name of the entry and will be used as a suffix\nfor the Job name." type: "string" replicas: default: 1 - description: "Replicas is the number of jobs that will be created from this ReplicatedJob's template. Jobs names will be in the format: --" + description: "Replicas is the number of jobs that will be created from this ReplicatedJob's template.\nJobs names will be in the format: --" format: "int32" type: "integer" template: description: "Template defines the template of the Job that will be created." properties: metadata: - description: "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + description: "Standard object's metadata of the jobs created from this template.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" properties: annotations: additionalProperties: @@ -101,60 +105,60 @@ spec: type: "string" type: "object" spec: - description: "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + description: "Specification of the desired behavior of the job.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" properties: activeDeadlineSeconds: - description: "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again." + description: "Specifies the duration in seconds relative to the startTime that the job\nmay be continuously active before the system tries to terminate it; value\nmust be positive integer. If a Job is suspended (at creation or through an\nupdate), this timer will effectively be stopped and reset when the Job is\nresumed again." format: "int64" type: "integer" backoffLimit: - description: "Specifies the number of retries before marking this job failed. Defaults to 6" + description: "Specifies the number of retries before marking this job failed.\nDefaults to 6" format: "int32" type: "integer" backoffLimitPerIndex: - description: "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)." + description: "Specifies the limit for the number of retries within an\nindex before marking this index as failed. When enabled the number of\nfailures per index is kept in the pod's\nbatch.kubernetes.io/job-index-failure-count annotation. It can only\nbe set when Job's completionMode=Indexed, and the Pod's restart\npolicy is Never. The field is immutable.\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex`\nfeature gate is enabled (enabled by default)." format: "int32" type: "integer" completionMode: - description: "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. \n `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. \n `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`. \n More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job." + description: "completionMode specifies how Pod completions are tracked. It can be\n`NonIndexed` (default) or `Indexed`.\n\n\n`NonIndexed` means that the Job is considered complete when there have\nbeen .spec.completions successfully completed Pods. Each Pod completion is\nhomologous to each other.\n\n\n`Indexed` means that the Pods of a\nJob get an associated completion index from 0 to (.spec.completions - 1),\navailable in the annotation batch.kubernetes.io/job-completion-index.\nThe Job is considered complete when there is one successfully completed Pod\nfor each index.\nWhen value is `Indexed`, .spec.completions must be specified and\n`.spec.parallelism` must be less than or equal to 10^5.\nIn addition, The Pod name takes the form\n`$(job-name)-$(index)-$(random-string)`,\nthe Pod hostname takes the form `$(job-name)-$(index)`.\n\n\nMore completion modes can be added in the future.\nIf the Job controller observes a mode that it doesn't recognize, which\nis possible during upgrades due to version skew, the controller\nskips updates for the Job." type: "string" completions: - description: "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" + description: "Specifies the desired number of successfully finished pods the\njob should be run with. Setting to null means that the success of any\npod signals the success of all pods, and allows parallelism to have any positive\nvalue. Setting to 1 means that parallelism is limited to 1 and the success of that\npod signals the success of the job.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" format: "int32" type: "integer" manualSelector: - description: "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector" + description: "manualSelector controls generation of pod labels and pod selectors.\nLeave `manualSelector` unset unless you are certain what you are doing.\nWhen false or unset, the system pick labels unique to this job\nand appends those labels to the pod template. When true,\nthe user is responsible for picking unique labels and specifying\nthe selector. Failure to pick a unique label may cause this\nand other jobs to not function correctly. However, You may see\n`manualSelector=true` in jobs that were created with the old `extensions/v1beta1`\nAPI.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector" type: "boolean" maxFailedIndexes: - description: "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default)." + description: "Specifies the maximal number of failed indexes before marking the Job as\nfailed, when backoffLimitPerIndex is set. Once the number of failed\nindexes exceeds this number the entire Job is marked as Failed and its\nexecution is terminated. When left as null the job continues execution of\nall of its indexes and is marked with the `Complete` Job condition.\nIt can only be specified when backoffLimitPerIndex is set.\nIt can be null or up to completions. It is required and must be\nless than or equal to 10^4 when is completions greater than 10^5.\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex`\nfeature gate is enabled (enabled by default)." format: "int32" type: "integer" parallelism: - description: "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" + description: "Specifies the maximum desired number of pods the job should\nrun at any given time. The actual number of pods running in steady state will\nbe less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),\ni.e. when the work left to do is less than max parallelism.\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" format: "int32" type: "integer" podFailurePolicy: - description: "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure. \n This field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default)." + description: "Specifies the policy of handling failed pods. In particular, it allows to\nspecify the set of actions and conditions which need to be\nsatisfied to take the associated action.\nIf empty, the default behaviour applies - the counter of failed pods,\nrepresented by the jobs's .status.failed field, is incremented and it is\nchecked against the backoffLimit. This field cannot be used in combination\nwith restartPolicy=OnFailure.\n\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy`\nfeature gate is enabled (enabled by default)." properties: rules: - description: "A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed." + description: "A list of pod failure policy rules. The rules are evaluated in order.\nOnce a rule matches a Pod failure, the remaining of the rules are ignored.\nWhen no rule matches the Pod failure, the default handling applies - the\ncounter of pod failures is incremented and it is checked against\nthe backoffLimit. At most 20 elements are allowed." items: - description: "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule." + description: "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.\nOne of onExitCodes and onPodConditions, but not both, can be used in each rule." properties: action: - description: "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: \n - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. This value is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule." + description: "Specifies the action taken on a pod failure when the requirements are satisfied.\nPossible values are:\n\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown action by skipping the rule." type: "string" onExitCodes: description: "Represents the requirement on the container exit codes." properties: containerName: - description: "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template." + description: "Restricts the check for exit codes to the container with the\nspecified name. When null, the rule applies to all containers.\nWhen specified, it should match one the container or initContainer\nnames in the pod template." type: "string" operator: - description: "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: \n - In: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is in the set of specified values. - NotIn: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is not in the set of specified values. Additional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied." + description: "Represents the relationship between the container exit code(s) and the\nspecified values. Containers completed with success (exit code 0) are\nexcluded from the requirement check. Possible values are:\n\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should\nreact to an unknown operator by assuming the requirement is not satisfied." type: "string" values: - description: "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed." + description: "Specifies the set of values. Each returned container exit code (might be\nmultiple in case of multiple containers) is checked against this set of\nvalues with respect to the operator. The list of values must be ordered\nand must not contain duplicates. Value '0' cannot be used for the In operator.\nAt least one element is required. At most 255 elements are allowed." items: format: "int32" type: "integer" @@ -165,15 +169,15 @@ spec: - "values" type: "object" onPodConditions: - description: "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed." + description: "Represents the requirement on the pod conditions. The requirement is represented\nas a list of pod condition patterns. The requirement is satisfied if at\nleast one pattern matches an actual pod condition. At most 20 elements are allowed." items: - description: "PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type." + description: "PodFailurePolicyOnPodConditionsPattern describes a pattern for matching\nan actual pod condition type." properties: status: - description: "Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True." + description: "Specifies the required Pod condition status. To match a pod condition\nit is required that the specified status equals the pod condition status.\nDefaults to True." type: "string" type: - description: "Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type." + description: "Specifies the required Pod condition type. To match a pod condition\nit is required that specified type equals the pod condition type." type: "string" required: - "status" @@ -190,24 +194,24 @@ spec: - "rules" type: "object" podReplacementPolicy: - description: "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods when they are terminating (has a metadata.deletionTimestamp) or failed. - Failed means to wait until a previously created Pod is fully terminated (has phase Failed or Succeeded) before creating a replacement Pod. \n When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default." + description: "podReplacementPolicy specifies when to create replacement Pods.\nPossible values are:\n- TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\n\nWhen using podFailurePolicy, Failed is the the only allowed value.\nTerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.\nThis is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.\nThis is on by default." type: "string" selector: - description: "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" + description: "A label query over pods that should match the pod count.\nNormally, the system sets this field for you.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -219,18 +223,18 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" suspend: - description: "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false." + description: "suspend specifies whether the Job controller should create Pods or not. If\na Job is created with suspend set to true, no Pods are created by the Job\ncontroller. If a Job is suspended after creation (i.e. the flag goes from\nfalse to true), the Job controller will delete all active Pods associated\nwith this Job. Users must design their workload to gracefully handle this.\nSuspending a Job will reset the StartTime field of the Job, effectively\nresetting the ActiveDeadlineSeconds timer too. Defaults to false." type: "boolean" template: - description: "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" + description: "Describes the pod that will be created when executing a job.\nThe only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\".\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" properties: metadata: - description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + description: "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" properties: annotations: additionalProperties: @@ -250,10 +254,10 @@ spec: type: "string" type: "object" spec: - description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + description: "Specification of the desired behavior of the pod.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" properties: activeDeadlineSeconds: - description: "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer." + description: "Optional duration in seconds the pod may be active on the node relative to\nStartTime before the system will actively try to mark it failed and kill associated containers.\nValue must be a positive integer." format: "int64" type: "integer" affinity: @@ -263,9 +267,9 @@ spec: description: "Describes node affinity scheduling rules for the pod." properties: preferredDuringSchedulingIgnoredDuringExecution: - description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred." + description: "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." items: - description: "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op)." + description: "An empty preferred scheduling term matches all objects with implicit weight 0\n(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op)." properties: preference: description: "A node selector term, associated with the corresponding weight." @@ -273,16 +277,16 @@ spec: matchExpressions: description: "A list of node selector requirements by node's labels." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -294,16 +298,16 @@ spec: matchFields: description: "A list of node selector requirements by node's fields." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -324,26 +328,26 @@ spec: type: "object" type: "array" requiredDuringSchedulingIgnoredDuringExecution: - description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node." + description: "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to an update), the system\nmay or may not try to eventually evict the pod from its node." properties: nodeSelectorTerms: description: "Required. A list of node selector terms. The terms are ORed." items: - description: "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm." + description: "A null or empty node selector term matches no objects. The requirements of\nthem are ANDed.\nThe TopologySelectorTerm type implements a subset of the NodeSelectorTerm." properties: matchExpressions: description: "A list of node selector requirements by node's labels." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -355,16 +359,16 @@ spec: matchFields: description: "A list of node selector requirements by node's fields." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -385,7 +389,7 @@ spec: description: "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))." properties: preferredDuringSchedulingIgnoredDuringExecution: - description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred." + description: "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." items: description: "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" properties: @@ -393,21 +397,21 @@ spec: description: "Required. A pod affinity term, associated with the corresponding weight." properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -419,38 +423,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -462,23 +466,23 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" type: "object" weight: - description: "weight associated with matching the corresponding podAffinityTerm, in the range 1-100." + description: "weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." format: "int32" type: "integer" required: @@ -487,26 +491,26 @@ spec: type: "object" type: "array" requiredDuringSchedulingIgnoredDuringExecution: - description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied." + description: "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." items: - description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running" + description: "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key matches that of any node on which\na pod of the set of pods is running" properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -518,38 +522,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -561,17 +565,17 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" @@ -582,7 +586,7 @@ spec: description: "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))." properties: preferredDuringSchedulingIgnoredDuringExecution: - description: "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred." + description: "The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." items: description: "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" properties: @@ -590,21 +594,21 @@ spec: description: "Required. A pod affinity term, associated with the corresponding weight." properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -616,38 +620,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -659,23 +663,23 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" type: "object" weight: - description: "weight associated with matching the corresponding podAffinityTerm, in the range 1-100." + description: "weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." format: "int32" type: "integer" required: @@ -684,26 +688,26 @@ spec: type: "object" type: "array" requiredDuringSchedulingIgnoredDuringExecution: - description: "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied." + description: "If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." items: - description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running" + description: "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key matches that of any node on which\na pod of the set of pods is running" properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -715,38 +719,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -758,17 +762,17 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" @@ -780,22 +784,22 @@ spec: description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted." type: "boolean" containers: - description: "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated." + description: "List of containers belonging to the pod.\nContainers cannot currently be added or removed.\nThere must be at least one container in a Pod.\nCannot be updated." items: description: "A single application container that you want to run within a pod." properties: args: - description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" command: - description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" env: - description: "List of environment variables to set in the container. Cannot be updated." + description: "List of environment variables to set in the container.\nCannot be updated." items: description: "EnvVar represents an environment variable present in a Container." properties: @@ -803,7 +807,7 @@ spec: description: "Name of the environment variable. Must be a C_IDENTIFIER." type: "string" value: - description: "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\"." + description: "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." type: "string" valueFrom: description: "Source for the environment variable's value. Cannot be used if value is not empty." @@ -815,7 +819,7 @@ spec: description: "The key to select." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap or its key must be defined" @@ -825,7 +829,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" fieldRef: - description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." properties: apiVersion: description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." @@ -838,7 +842,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -864,7 +868,7 @@ spec: description: "The key of the secret to select from. Must be a valid secret key." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret or its key must be defined" @@ -879,7 +883,7 @@ spec: type: "object" type: "array" envFrom: - description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated." + description: "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." items: description: "EnvFromSource represents the source of a set of ConfigMaps" properties: @@ -887,7 +891,7 @@ spec: description: "The ConfigMap to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap must be defined" @@ -901,7 +905,7 @@ spec: description: "The Secret to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret must be defined" @@ -911,22 +915,22 @@ spec: type: "object" type: "array" image: - description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets." + description: "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets." type: "string" imagePullPolicy: - description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + description: "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" type: "string" lifecycle: - description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated." + description: "Actions that the management system should take in response to container lifecycle events.\nCannot be updated." properties: postStart: - description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -935,7 +939,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -943,7 +947,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -960,10 +964,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -979,7 +983,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -988,20 +992,20 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" type: "object" preStop: - description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -1010,7 +1014,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1018,7 +1022,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1035,10 +1039,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -1054,7 +1058,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -1063,7 +1067,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" @@ -1071,19 +1075,19 @@ spec: type: "object" type: "object" livenessProbe: - description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1094,7 +1098,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1103,7 +1107,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1111,7 +1115,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1128,24 +1132,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -1158,45 +1162,45 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" name: - description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated." + description: "Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." type: "string" ports: - description: "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated." + description: "List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." items: description: "ContainerPort represents a network port in a single container." properties: containerPort: - description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536." + description: "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." format: "int32" type: "integer" hostIP: description: "What host IP to bind the external port to." type: "string" hostPort: - description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this." + description: "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." format: "int32" type: "integer" name: - description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services." + description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." type: "string" protocol: default: "TCP" - description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\"." + description: "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." type: "string" required: - "containerPort" @@ -1207,19 +1211,19 @@ spec: - "protocol" x-kubernetes-list-type: "map" readinessProbe: - description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1230,7 +1234,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1239,7 +1243,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1247,7 +1251,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1264,24 +1268,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -1294,17 +1298,17 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" @@ -1314,10 +1318,10 @@ spec: description: "ContainerResizePolicy represents resource resize policy for the container." properties: resourceName: - description: "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory." + description: "Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." type: "string" restartPolicy: - description: "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired." + description: "Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." type: "string" required: - "resourceName" @@ -1326,15 +1330,15 @@ spec: type: "array" x-kubernetes-list-type: "atomic" resources: - description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + description: "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." items: description: "ResourceClaim references one entry in PodSpec.ResourceClaims." properties: name: - description: "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." + description: "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." type: "string" required: - "name" @@ -1350,7 +1354,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -1359,20 +1363,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" restartPolicy: - description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed." + description: "RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is \"Always\".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod's restart policy and the container type.\nSetting the RestartPolicy as \"Always\" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy \"Always\"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a \"sidecar\" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted." type: "string" securityContext: - description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + description: "SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" properties: allowPrivilegeEscalation: - description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows." + description: "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" capabilities: - description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." + description: "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." properties: add: description: "Added capabilities" @@ -1388,27 +1392,27 @@ spec: type: "array" type: "object" privileged: - description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows." + description: "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" procMount: - description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows." + description: "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." type: "string" readOnlyRootFilesystem: - description: "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows." + description: "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -1424,48 +1428,48 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" startupProbe: - description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1476,7 +1480,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1485,7 +1489,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1493,7 +1497,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1510,24 +1514,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -1540,34 +1544,34 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" stdin: - description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false." + description: "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." type: "boolean" stdinOnce: - description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false" + description: "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" type: "boolean" terminationMessagePath: - description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated." + description: "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." type: "string" terminationMessagePolicy: - description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated." + description: "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." type: "string" tty: - description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false." + description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." type: "boolean" volumeDevices: description: "volumeDevices is the list of block devices to be used by the container." @@ -1586,27 +1590,27 @@ spec: type: "object" type: "array" volumeMounts: - description: "Pod volumes to mount into the container's filesystem. Cannot be updated." + description: "Pod volumes to mount into the container's filesystem.\nCannot be updated." items: description: "VolumeMount describes a mounting of a Volume within a container." properties: mountPath: - description: "Path within the container at which the volume should be mounted. Must not contain ':'." + description: "Path within the container at which the volume should be mounted. Must\nnot contain ':'." type: "string" mountPropagation: - description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10." + description: "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10." type: "string" name: description: "This must match the Name of a Volume." type: "string" readOnly: - description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false." + description: "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." type: "boolean" subPath: - description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root)." + description: "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." type: "string" subPathExpr: - description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive." + description: "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." type: "string" required: - "mountPath" @@ -1614,22 +1618,22 @@ spec: type: "object" type: "array" workingDir: - description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated." + description: "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." type: "string" required: - "name" type: "object" type: "array" dnsConfig: - description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy." + description: "Specifies the DNS parameters of a pod.\nParameters specified here will be merged to the generated DNS\nconfiguration based on DNSPolicy." properties: nameservers: - description: "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed." + description: "A list of DNS name server IP addresses.\nThis will be appended to the base nameservers generated from DNSPolicy.\nDuplicated nameservers will be removed." items: type: "string" type: "array" options: - description: "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy." + description: "A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy." items: description: "PodDNSConfigOption defines DNS resolver options of a pod." properties: @@ -1641,34 +1645,34 @@ spec: type: "object" type: "array" searches: - description: "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed." + description: "A list of DNS search domains for host-name lookup.\nThis will be appended to the base search paths generated from DNSPolicy.\nDuplicated search paths will be removed." items: type: "string" type: "array" type: "object" dnsPolicy: - description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'." + description: "Set DNS policy for the pod.\nDefaults to \"ClusterFirst\".\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\nexplicitly to 'ClusterFirstWithHostNet'." type: "string" enableServiceLinks: - description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true." + description: "EnableServiceLinks indicates whether information about services should be injected into pod's\nenvironment variables, matching the syntax of Docker links.\nOptional: Defaults to true." type: "boolean" ephemeralContainers: - description: "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource." + description: "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource." items: - description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\nscheduling guarantees, and they will not be restarted when they exit or when a Pod is\nremoved or restarted. The kubelet may evict a Pod if an ephemeral container causes the\nPod to exceed its resource allocation.\n\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing\nPod. Ephemeral containers may not be removed or restarted." properties: args: - description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Arguments to the entrypoint.\nThe image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" command: - description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Entrypoint array. Not executed within a shell.\nThe image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" env: - description: "List of environment variables to set in the container. Cannot be updated." + description: "List of environment variables to set in the container.\nCannot be updated." items: description: "EnvVar represents an environment variable present in a Container." properties: @@ -1676,7 +1680,7 @@ spec: description: "Name of the environment variable. Must be a C_IDENTIFIER." type: "string" value: - description: "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\"." + description: "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." type: "string" valueFrom: description: "Source for the environment variable's value. Cannot be used if value is not empty." @@ -1688,7 +1692,7 @@ spec: description: "The key to select." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap or its key must be defined" @@ -1698,7 +1702,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" fieldRef: - description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." properties: apiVersion: description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." @@ -1711,7 +1715,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -1737,7 +1741,7 @@ spec: description: "The key of the secret to select from. Must be a valid secret key." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret or its key must be defined" @@ -1752,7 +1756,7 @@ spec: type: "object" type: "array" envFrom: - description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated." + description: "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." items: description: "EnvFromSource represents the source of a set of ConfigMaps" properties: @@ -1760,7 +1764,7 @@ spec: description: "The ConfigMap to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap must be defined" @@ -1774,7 +1778,7 @@ spec: description: "The Secret to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret must be defined" @@ -1784,22 +1788,22 @@ spec: type: "object" type: "array" image: - description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images" + description: "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images" type: "string" imagePullPolicy: - description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + description: "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" type: "string" lifecycle: description: "Lifecycle is not allowed for ephemeral containers." properties: postStart: - description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -1808,7 +1812,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1816,7 +1820,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1833,10 +1837,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -1852,7 +1856,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -1861,20 +1865,20 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" type: "object" preStop: - description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -1883,7 +1887,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1891,7 +1895,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1908,10 +1912,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -1927,7 +1931,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -1936,7 +1940,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" @@ -1950,13 +1954,13 @@ spec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1967,7 +1971,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1976,7 +1980,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1984,7 +1988,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2001,24 +2005,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2031,22 +2035,22 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" name: - description: "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers." + description: "Name of the ephemeral container specified as a DNS_LABEL.\nThis name must be unique among all containers, init containers and ephemeral containers." type: "string" ports: description: "Ports are not allowed for ephemeral containers." @@ -2054,22 +2058,22 @@ spec: description: "ContainerPort represents a network port in a single container." properties: containerPort: - description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536." + description: "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." format: "int32" type: "integer" hostIP: description: "What host IP to bind the external port to." type: "string" hostPort: - description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this." + description: "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." format: "int32" type: "integer" name: - description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services." + description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." type: "string" protocol: default: "TCP" - description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\"." + description: "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." type: "string" required: - "containerPort" @@ -2086,13 +2090,13 @@ spec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -2103,7 +2107,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -2112,7 +2116,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2120,7 +2124,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2137,24 +2141,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2167,17 +2171,17 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" @@ -2187,10 +2191,10 @@ spec: description: "ContainerResizePolicy represents resource resize policy for the container." properties: resourceName: - description: "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory." + description: "Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." type: "string" restartPolicy: - description: "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired." + description: "Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." type: "string" required: - "resourceName" @@ -2199,15 +2203,15 @@ spec: type: "array" x-kubernetes-list-type: "atomic" resources: - description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod." + description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources\nalready allocated to the pod." properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + description: "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." items: description: "ResourceClaim references one entry in PodSpec.ResourceClaims." properties: name: - description: "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." + description: "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." type: "string" required: - "name" @@ -2223,7 +2227,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -2232,20 +2236,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" restartPolicy: - description: "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers." + description: "Restart policy for the container to manage the restart behavior of each\ncontainer within a pod.\nThis may only be set for init containers. You cannot set this field on\nephemeral containers." type: "string" securityContext: - description: "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext." + description: "Optional: SecurityContext defines the security options the ephemeral container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext." properties: allowPrivilegeEscalation: - description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows." + description: "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" capabilities: - description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." + description: "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." properties: add: description: "Added capabilities" @@ -2261,27 +2265,27 @@ spec: type: "array" type: "object" privileged: - description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows." + description: "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" procMount: - description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows." + description: "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." type: "string" readOnlyRootFilesystem: - description: "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows." + description: "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -2297,31 +2301,31 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" @@ -2332,13 +2336,13 @@ spec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -2349,7 +2353,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -2358,7 +2362,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2366,7 +2370,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2383,24 +2387,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2413,37 +2417,37 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" stdin: - description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false." + description: "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." type: "boolean" stdinOnce: - description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false" + description: "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" type: "boolean" targetContainerName: - description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + description: "If set, the name of the container from PodSpec that this ephemeral container targets.\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\nIf not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\n\nThe container runtime must implement support for this feature. If the runtime does not\nsupport namespace targeting then the result of setting this field is undefined." type: "string" terminationMessagePath: - description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated." + description: "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." type: "string" terminationMessagePolicy: - description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated." + description: "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." type: "string" tty: - description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false." + description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." type: "boolean" volumeDevices: description: "volumeDevices is the list of block devices to be used by the container." @@ -2462,27 +2466,27 @@ spec: type: "object" type: "array" volumeMounts: - description: "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated." + description: "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\nCannot be updated." items: description: "VolumeMount describes a mounting of a Volume within a container." properties: mountPath: - description: "Path within the container at which the volume should be mounted. Must not contain ':'." + description: "Path within the container at which the volume should be mounted. Must\nnot contain ':'." type: "string" mountPropagation: - description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10." + description: "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10." type: "string" name: description: "This must match the Name of a Volume." type: "string" readOnly: - description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false." + description: "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." type: "boolean" subPath: - description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root)." + description: "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." type: "string" subPathExpr: - description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive." + description: "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." type: "string" required: - "mountPath" @@ -2490,16 +2494,16 @@ spec: type: "object" type: "array" workingDir: - description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated." + description: "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." type: "string" required: - "name" type: "object" type: "array" hostAliases: - description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods." + description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\nfile if specified. This is only valid for non-hostNetwork pods." items: - description: "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file." + description: "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the\npod's hosts file." properties: hostnames: description: "Hostnames for the above IP address." @@ -2512,48 +2516,48 @@ spec: type: "object" type: "array" hostIPC: - description: "Use the host's ipc namespace. Optional: Default to false." + description: "Use the host's ipc namespace.\nOptional: Default to false." type: "boolean" hostNetwork: - description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false." + description: "Host networking requested for this pod. Use the host's network namespace.\nIf this option is set, the ports that will be used must be specified.\nDefault to false." type: "boolean" hostPID: - description: "Use the host's pid namespace. Optional: Default to false." + description: "Use the host's pid namespace.\nOptional: Default to false." type: "boolean" hostUsers: - description: "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature." + description: "Use the host's user namespace.\nOptional: Default to true.\nIf set to true or not present, the pod will be run in the host user namespace, useful\nfor when the pod needs a feature only available to the host user namespace, such as\nloading a kernel module with CAP_SYS_MODULE.\nWhen set to false, a new userns is created for the pod. Setting false is useful for\nmitigating container breakout vulnerabilities even allowing users to run their\ncontainers as root without actually having root privileges on the host.\nThis field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature." type: "boolean" hostname: - description: "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value." + description: "Specifies the hostname of the Pod\nIf not specified, the pod's hostname will be set to a system-defined value." type: "string" imagePullSecrets: - description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" + description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" items: - description: "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace." + description: "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" type: "array" initContainers: - description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/" + description: "List of initialization containers belonging to the pod.\nInit containers are executed in order prior to containers being started. If any\ninit container fails, the pod is considered to have failed and is handled according\nto its restartPolicy. The name for an init container or normal container must be\nunique among all containers.\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\nThe resourceRequirements of an init container are taken into account during scheduling\nby finding the highest request/limit for each resource type, and then using the max of\nof that value or the sum of the normal containers. Limits are applied to init containers\nin a similar fashion.\nInit containers cannot currently be added or removed.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/" items: description: "A single application container that you want to run within a pod." properties: args: - description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" command: - description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" env: - description: "List of environment variables to set in the container. Cannot be updated." + description: "List of environment variables to set in the container.\nCannot be updated." items: description: "EnvVar represents an environment variable present in a Container." properties: @@ -2561,7 +2565,7 @@ spec: description: "Name of the environment variable. Must be a C_IDENTIFIER." type: "string" value: - description: "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\"." + description: "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." type: "string" valueFrom: description: "Source for the environment variable's value. Cannot be used if value is not empty." @@ -2573,7 +2577,7 @@ spec: description: "The key to select." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap or its key must be defined" @@ -2583,7 +2587,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" fieldRef: - description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." properties: apiVersion: description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." @@ -2596,7 +2600,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -2622,7 +2626,7 @@ spec: description: "The key of the secret to select from. Must be a valid secret key." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret or its key must be defined" @@ -2637,7 +2641,7 @@ spec: type: "object" type: "array" envFrom: - description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated." + description: "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." items: description: "EnvFromSource represents the source of a set of ConfigMaps" properties: @@ -2645,7 +2649,7 @@ spec: description: "The ConfigMap to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap must be defined" @@ -2659,7 +2663,7 @@ spec: description: "The Secret to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret must be defined" @@ -2669,22 +2673,22 @@ spec: type: "object" type: "array" image: - description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets." + description: "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets." type: "string" imagePullPolicy: - description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + description: "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" type: "string" lifecycle: - description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated." + description: "Actions that the management system should take in response to container lifecycle events.\nCannot be updated." properties: postStart: - description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -2693,7 +2697,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2701,7 +2705,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2718,10 +2722,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -2737,7 +2741,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -2746,20 +2750,20 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" type: "object" preStop: - description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -2768,7 +2772,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2776,7 +2780,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2793,10 +2797,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -2812,7 +2816,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -2821,7 +2825,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" @@ -2829,19 +2833,19 @@ spec: type: "object" type: "object" livenessProbe: - description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -2852,7 +2856,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -2861,7 +2865,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2869,7 +2873,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2886,24 +2890,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2916,45 +2920,45 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" name: - description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated." + description: "Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." type: "string" ports: - description: "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated." + description: "List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." items: description: "ContainerPort represents a network port in a single container." properties: containerPort: - description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536." + description: "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." format: "int32" type: "integer" hostIP: description: "What host IP to bind the external port to." type: "string" hostPort: - description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this." + description: "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." format: "int32" type: "integer" name: - description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services." + description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." type: "string" protocol: default: "TCP" - description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\"." + description: "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." type: "string" required: - "containerPort" @@ -2965,19 +2969,19 @@ spec: - "protocol" x-kubernetes-list-type: "map" readinessProbe: - description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -2988,7 +2992,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -2997,7 +3001,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -3005,7 +3009,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -3022,24 +3026,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -3052,17 +3056,17 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" @@ -3072,10 +3076,10 @@ spec: description: "ContainerResizePolicy represents resource resize policy for the container." properties: resourceName: - description: "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory." + description: "Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." type: "string" restartPolicy: - description: "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired." + description: "Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." type: "string" required: - "resourceName" @@ -3084,15 +3088,15 @@ spec: type: "array" x-kubernetes-list-type: "atomic" resources: - description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + description: "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." items: description: "ResourceClaim references one entry in PodSpec.ResourceClaims." properties: name: - description: "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." + description: "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." type: "string" required: - "name" @@ -3108,7 +3112,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -3117,20 +3121,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" restartPolicy: - description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed." + description: "RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is \"Always\".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod's restart policy and the container type.\nSetting the RestartPolicy as \"Always\" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy \"Always\"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a \"sidecar\" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted." type: "string" securityContext: - description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + description: "SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" properties: allowPrivilegeEscalation: - description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows." + description: "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" capabilities: - description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." + description: "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." properties: add: description: "Added capabilities" @@ -3146,27 +3150,27 @@ spec: type: "array" type: "object" privileged: - description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows." + description: "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" procMount: - description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows." + description: "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." type: "string" readOnlyRootFilesystem: - description: "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows." + description: "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -3182,48 +3186,48 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" startupProbe: - description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -3234,7 +3238,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -3243,7 +3247,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -3251,7 +3255,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -3268,24 +3272,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -3298,34 +3302,34 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" stdin: - description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false." + description: "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." type: "boolean" stdinOnce: - description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false" + description: "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" type: "boolean" terminationMessagePath: - description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated." + description: "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." type: "string" terminationMessagePolicy: - description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated." + description: "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." type: "string" tty: - description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false." + description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." type: "boolean" volumeDevices: description: "volumeDevices is the list of block devices to be used by the container." @@ -3344,27 +3348,27 @@ spec: type: "object" type: "array" volumeMounts: - description: "Pod volumes to mount into the container's filesystem. Cannot be updated." + description: "Pod volumes to mount into the container's filesystem.\nCannot be updated." items: description: "VolumeMount describes a mounting of a Volume within a container." properties: mountPath: - description: "Path within the container at which the volume should be mounted. Must not contain ':'." + description: "Path within the container at which the volume should be mounted. Must\nnot contain ':'." type: "string" mountPropagation: - description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10." + description: "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10." type: "string" name: description: "This must match the Name of a Volume." type: "string" readOnly: - description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false." + description: "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." type: "boolean" subPath: - description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root)." + description: "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." type: "string" subPathExpr: - description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive." + description: "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." type: "string" required: - "mountPath" @@ -3372,26 +3376,26 @@ spec: type: "object" type: "array" workingDir: - description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated." + description: "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." type: "string" required: - "name" type: "object" type: "array" nodeName: - description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements." + description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty,\nthe scheduler simply schedules this pod onto that node, assuming that it fits resource\nrequirements." type: "string" nodeSelector: additionalProperties: type: "string" - description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + description: "NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" type: "object" x-kubernetes-map-type: "atomic" os: - description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + description: "Specifies the OS of the containers in the pod.\nSome pod and container fields are restricted if this is set.\n\n\nIf the OS field is set to linux, the following fields must be unset:\n-securityContext.windowsOptions\n\n\nIf the OS field is set to windows, following fields must be unset:\n- spec.hostPID\n- spec.hostIPC\n- spec.hostUsers\n- spec.securityContext.seLinuxOptions\n- spec.securityContext.seccompProfile\n- spec.securityContext.fsGroup\n- spec.securityContext.fsGroupChangePolicy\n- spec.securityContext.sysctls\n- spec.shareProcessNamespace\n- spec.securityContext.runAsUser\n- spec.securityContext.runAsGroup\n- spec.securityContext.supplementalGroups\n- spec.containers[*].securityContext.seLinuxOptions\n- spec.containers[*].securityContext.seccompProfile\n- spec.containers[*].securityContext.capabilities\n- spec.containers[*].securityContext.readOnlyRootFilesystem\n- spec.containers[*].securityContext.privileged\n- spec.containers[*].securityContext.allowPrivilegeEscalation\n- spec.containers[*].securityContext.procMount\n- spec.containers[*].securityContext.runAsUser\n- spec.containers[*].securityContext.runAsGroup" properties: name: - description: "Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null" + description: "Name is the name of the operating system. The currently supported values are linux and windows.\nAdditional value may be defined in future and can be one of:\nhttps://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration\nClients should expect to handle additional values and treat unrecognized values in this field as os: null" type: "string" required: - "name" @@ -3403,20 +3407,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md" + description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md" type: "object" preemptionPolicy: - description: "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset." + description: "PreemptionPolicy is the Policy for preempting pods with lower priority.\nOne of Never, PreemptLowerPriority.\nDefaults to PreemptLowerPriority if unset." type: "string" priority: - description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority." + description: "The priority value. Various system components use this field to find the\npriority of the pod. When Priority Admission Controller is enabled, it\nprevents users from setting this field. The admission controller populates\nthis field from PriorityClassName.\nThe higher the value, the higher the priority." format: "int32" type: "integer" priorityClassName: - description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default." + description: "If specified, indicates the pod's priority. \"system-node-critical\" and\n\"system-cluster-critical\" are two special keywords which indicate the\nhighest priorities with the former being the highest priority. Any other\nname must be defined by creating a PriorityClass object with that name.\nIf not specified, the pod priority will be default or zero if there is no\ndefault." type: "string" readinessGates: - description: "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates" + description: "If specified, all readiness gates will be evaluated for pod readiness.\nA pod is ready when all its containers are ready AND\nall conditions specified in the readiness gates have status equal to \"True\"\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates" items: description: "PodReadinessGate contains the reference to a pod condition" properties: @@ -3428,21 +3432,21 @@ spec: type: "object" type: "array" resourceClaims: - description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + description: "ResourceClaims defines which ResourceClaims must be allocated\nand reserved before the Pod is allowed to start. The resources\nwill be made available to those containers which consume them\nby name.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable." items: - description: "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name." + description: "PodResourceClaim references exactly one ResourceClaim through a ClaimSource.\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod.\nContainers that need access to the ResourceClaim reference it with this name." properties: name: - description: "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL." + description: "Name uniquely identifies this resource claim inside the pod.\nThis must be a DNS_LABEL." type: "string" source: description: "Source describes where to find the ResourceClaim." properties: resourceClaimName: - description: "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod." + description: "ResourceClaimName is the name of a ResourceClaim object in the same\nnamespace as this pod." type: "string" resourceClaimTemplateName: - description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate\nobject in the same namespace as this pod.\n\n\nThe template will be used to create a new ResourceClaim, which will\nbe bound to this pod. When this pod is deleted, the ResourceClaim\nwill also be deleted. The pod name and resource name, along with a\ngenerated component, will be used to form a unique name for the\nResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\n\nThis field is immutable and no changes will be made to the\ncorresponding ResourceClaim by the control plane after creating the\nResourceClaim." type: "string" type: "object" required: @@ -3453,21 +3457,21 @@ spec: - "name" x-kubernetes-list-type: "map" restartPolicy: - description: "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy" + description: "Restart policy for all containers within the pod.\nOne of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.\nDefault to Always.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy" type: "string" runtimeClassName: - description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class" + description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class" type: "string" schedulerName: - description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler." + description: "If specified, the pod will be dispatched by specified scheduler.\nIf not specified, the pod will be dispatched by default scheduler." type: "string" schedulingGates: - description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. \n SchedulingGates can only be set at pod creation time, and be removed only afterwards. \n This is a beta feature enabled by the PodSchedulingReadiness feature gate." + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\nscheduler will not attempt to schedule the pod.\n\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate." items: description: "PodSchedulingGate is associated to a Pod to guard its scheduling." properties: name: - description: "Name of the scheduling gate. Each scheduling gate must have a unique name field." + description: "Name of the scheduling gate.\nEach scheduling gate must have a unique name field." type: "string" required: - "name" @@ -3477,28 +3481,28 @@ spec: - "name" x-kubernetes-list-type: "map" securityContext: - description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + description: "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field." properties: fsGroup: - description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + description: "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:\n\n\n1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" fsGroupChangePolicy: - description: "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows." + description: "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\nbefore being exposed inside Pod. This field will only apply to\nvolume types which support fsGroup based ownership(and permissions).\nIt will have no effect on ephemeral volume types such as: secret, configmaps\nand emptydir.\nValid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.\nNote that this field cannot be set when spec.os.name is windows." type: "string" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -3514,25 +3518,25 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by the containers in this pod.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" supplementalGroups: - description: "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows." + description: "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows." items: format: "int64" type: "integer" type: "array" sysctls: - description: "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows." + description: "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." items: description: "Sysctl defines a kernel parameter to be set" properties: @@ -3548,85 +3552,85 @@ spec: type: "object" type: "array" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" serviceAccount: - description: "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead." + description: "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.\nDeprecated: Use serviceAccountName instead." type: "string" serviceAccountName: - description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" + description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" type: "string" setHostnameAsFQDN: - description: "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false." + description: "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).\nIn Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).\nIn Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN.\nIf a pod does not have FQDN, this has no effect.\nDefault to false." type: "boolean" shareProcessNamespace: - description: "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false." + description: "Share a single process namespace between all of the containers in a pod.\nWhen this is set containers will be able to view and signal processes from other containers\nin the same pod, and the first process in each container will not be assigned PID 1.\nHostPID and ShareProcessNamespace cannot both be set.\nOptional: Default to false." type: "boolean" subdomain: - description: "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all." + description: "If specified, the fully qualified Pod hostname will be \"...svc.\".\nIf not specified, the pod will not have a domainname at all." type: "string" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds." + description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nIf this value is nil, the default grace period will be used instead.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nDefaults to 30 seconds." format: "int64" type: "integer" tolerations: description: "If specified, the pod's tolerations." items: - description: "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator ." + description: "The pod this Toleration is attached to tolerates any taint that matches\nthe triple using the matching operator ." properties: effect: - description: "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." + description: "Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." type: "string" key: - description: "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys." + description: "Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys." type: "string" operator: - description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category." + description: "Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category." type: "string" tolerationSeconds: - description: "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system." + description: "TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system." format: "int64" type: "integer" value: - description: "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string." + description: "Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string." type: "string" type: "object" type: "array" topologySpreadConstraints: - description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed." + description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\nAll topologySpreadConstraints are ANDed." items: description: "TopologySpreadConstraint specifies how to spread matching pods among the given topology." properties: labelSelector: - description: "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain." + description: "LabelSelector is used to find matching pods.\nPods that match this label selector are counted to determine the number of pods\nin their corresponding topology domain." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -3638,35 +3642,35 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." + description: "MatchLabelKeys is a set of pod label keys to select the pods over which\nspreading will be calculated. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are ANDed with labelSelector\nto select the group of existing pods over which spreading will be calculated\nfor the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nMatchLabelKeys cannot be set when LabelSelector isn't set.\nKeys that don't exist in the incoming pod labels will\nbe ignored. A null or empty list means only match against labelSelector.\n\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" maxSkew: - description: "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed." + description: "MaxSkew describes the degree to which pods may be unevenly distributed.\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\nbetween the number of matching pods in the target topology and the global minimum.\nThe global minimum is the minimum number of matching pods in an eligible domain\nor zero if the number of eligible domains is less than MinDomains.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 2/2/1:\nIn this case, the global minimum is 1.\n| zone1 | zone2 | zone3 |\n| P P | P P | P |\n- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;\nscheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)\nviolate MaxSkew(1).\n- if MaxSkew is 2, incoming pod can be scheduled onto any zone.\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\nto topologies that satisfy it.\nIt's a required field. Default value is 1 and 0 is not allowed." format: "int32" type: "integer" minDomains: - description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + description: "MinDomains indicates a minimum number of eligible domains.\nWhen the number of eligible domains with matching topology keys is less than minDomains,\nPod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed.\nAnd when the number of eligible domains with matching topology keys equals or greater than minDomains,\nthis value has no effect on scheduling.\nAs a result, when the number of eligible domains is less than minDomains,\nscheduler won't schedule more than maxSkew Pods to those domains.\nIf value is nil, the constraint behaves as if MinDomains is equal to 1.\nValid values are integers greater than 0.\nWhen value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same\nlabelSelector spread as 2/2/2:\n| zone1 | zone2 | zone3 |\n| P P | P P | P P |\nThe number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0.\nIn this situation, new pod with the same labelSelector cannot be scheduled,\nbecause computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,\nit will violate MaxSkew.\n\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." format: "int32" type: "integer" nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector\nwhen calculating pod topology spread skew. Options are:\n- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.\n- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\n\nIf this value is nil, the behavior is equivalent to the Honor policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: "string" nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating\npod topology spread skew. Options are:\n- Honor: nodes without taints, along with tainted nodes for which the incoming pod\nhas a toleration, are included.\n- Ignore: node taints are ignored. All nodes are included.\n\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: "string" topologyKey: - description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field." + description: "TopologyKey is the key of node labels. Nodes that have a label with this key\nand identical values are considered to be in the same topology.\nWe consider each as a \"bucket\", and try to put balanced number\nof pods into each bucket.\nWe define a domain as a particular instance of a topology.\nAlso, we define an eligible domain as a domain whose nodes meet the requirements of\nnodeAffinityPolicy and nodeTaintsPolicy.\ne.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology.\nAnd, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology.\nIt's a required field." type: "string" whenUnsatisfiable: - description: "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field." + description: "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy\nthe spread constraint.\n- DoNotSchedule (default) tells the scheduler not to schedule it.\n- ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod\nif and only if every possible node assignment for that pod would violate\n\"MaxSkew\" on some topology.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 3/1/1:\n| zone1 | zone2 | zone3 |\n| P P P | P | P |\nIf WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled\nto zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\nMaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler\nwon't make it *more* imbalanced.\nIt's a required field." type: "string" required: - "maxSkew" @@ -3679,25 +3683,25 @@ spec: - "whenUnsatisfiable" x-kubernetes-list-type: "map" volumes: - description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes" + description: "List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" items: description: "Volume represents a named volume in a pod that may be accessed by any container in the pod." properties: awsElasticBlockStore: - description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" properties: fsType: - description: "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" partition: - description: "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty)." + description: "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty)." format: "int32" type: "integer" readOnly: - description: "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + description: "readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" type: "boolean" volumeID: - description: "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + description: "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" type: "string" required: - "volumeID" @@ -3715,13 +3719,13 @@ spec: description: "diskURI is the URI of data disk in the blob storage" type: "string" fsType: - description: "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" kind: description: "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" type: "string" readOnly: - description: "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" required: - "diskName" @@ -3731,7 +3735,7 @@ spec: description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod." properties: readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretName: description: "secretName is the name of secret that contains Azure Storage Account Name and Key" @@ -3747,7 +3751,7 @@ spec: description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime" properties: monitors: - description: "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" items: type: "string" type: "array" @@ -3755,44 +3759,44 @@ spec: description: "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /" type: "string" readOnly: - description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" type: "boolean" secretFile: - description: "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" type: "string" secretRef: - description: "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" user: - description: "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" type: "string" required: - "monitors" type: "object" cinder: - description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" type: "string" readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" type: "boolean" secretRef: - description: "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack." + description: "secretRef is optional: points to a secret object containing parameters used to connect\nto OpenStack." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" volumeID: - description: "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" type: "string" required: - "volumeID" @@ -3801,11 +3805,11 @@ spec: description: "configMap represents a configMap that should populate this volume" properties: defaultMode: - description: "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" items: - description: "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -3813,11 +3817,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -3825,7 +3829,7 @@ spec: type: "object" type: "array" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "optional specify whether the ConfigMap or its keys must be defined" @@ -3836,26 +3840,26 @@ spec: description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)." properties: driver: - description: "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster." + description: "driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." type: "string" fsType: - description: "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply." + description: "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply." type: "string" nodePublishSecretRef: - description: "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed." + description: "nodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" readOnly: - description: "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write)." + description: "readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write)." type: "boolean" volumeAttributes: additionalProperties: type: "string" - description: "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values." + description: "volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values." type: "object" required: - "driver" @@ -3864,7 +3868,7 @@ spec: description: "downwardAPI represents downward API about the pod that should populate this volume" properties: defaultMode: - description: "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "Optional: mode bits to use on created files by default. Must be a\nOptional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" items: @@ -3886,14 +3890,14 @@ spec: type: "object" x-kubernetes-map-type: "atomic" mode: - description: "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: description: "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" type: "string" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -3918,27 +3922,27 @@ spec: type: "array" type: "object" emptyDir: - description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + description: "emptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" properties: medium: - description: "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + description: "medium represents what type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" type: "string" sizeLimit: anyOf: - type: "integer" - type: "string" - description: "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + description: "sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true type: "object" ephemeral: - description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + description: "ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time." properties: volumeClaimTemplate: - description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + description: "Will be used to create a stand-alone PVC to provision the volume.\nThe pod in which this EphemeralVolumeSource is embedded will be the\nowner of the PVC, i.e. the PVC will be deleted together with the\npod. The name of the PVC will be `-` where\n`` is the name from the `PodSpec.Volumes` array\nentry. Pod validation will reject the pod if the concatenated name\nis not valid for a PVC (for example, too long).\n\n\nAn existing PVC with that name that is not owned by the pod\nwill *not* be used for the pod to avoid using an unrelated\nvolume by mistake. Starting the pod is then blocked until\nthe unrelated PVC is removed. If such a pre-created PVC is\nmeant to be used by the pod, the PVC has to updated with an\nowner reference to the pod once the pod exists. Normally\nthis should not be necessary, but it may be useful when\nmanually reconstructing a broken cluster.\n\n\nThis field is read-only and no changes will be made by Kubernetes\nto the PVC after it has been created.\n\n\nRequired, must not be nil." properties: metadata: - description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation." + description: "May contain labels and annotations that will be copied into the PVC\nwhen creating it. No other fields are allowed and will be rejected during\nvalidation." properties: annotations: additionalProperties: @@ -3958,18 +3962,18 @@ spec: type: "string" type: "object" spec: - description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here." + description: "The specification for the PersistentVolumeClaim. The entire content is\ncopied unchanged into the PVC that gets created from this\ntemplate. The same fields as in a PersistentVolumeClaim\nare also valid here." properties: accessModes: - description: "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" + description: "accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" items: type: "string" type: "array" dataSource: - description: "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource." + description: "dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource." properties: apiGroup: - description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required." + description: "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." type: "string" kind: description: "Kind is the type of resource being referenced" @@ -3983,10 +3987,10 @@ spec: type: "object" x-kubernetes-map-type: "atomic" dataSourceRef: - description: "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + description: "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the dataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, when namespace isn't specified in dataSourceRef,\nboth fields (dataSource and dataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nWhen namespace is specified in dataSourceRef,\ndataSource isn't set to the same value and must be empty.\nThere are three important differences between dataSource and dataSourceRef:\n* While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." properties: apiGroup: - description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required." + description: "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." type: "string" kind: description: "Kind is the type of resource being referenced" @@ -3995,14 +3999,14 @@ spec: description: "Name is the name of resource being referenced" type: "string" namespace: - description: "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + description: "Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled." type: "string" required: - "kind" - "name" type: "object" resources: - description: "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + description: "resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" properties: limits: additionalProperties: @@ -4011,7 +4015,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -4020,7 +4024,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" selector: @@ -4029,16 +4033,16 @@ spec: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -4050,18 +4054,18 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" storageClassName: - description: "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1" + description: "storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1" type: "string" volumeAttributesClassName: - description: "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled." + description: "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.\nIf specified, the CSI driver will create or update the volume with the attributes defined\nin the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,\nit can be changed after the claim is created. An empty string value means that no VolumeAttributesClass\nwill be applied to the claim but it's not allowed to reset this field to empty string once it is set.\nIf unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass\nwill be set by the persistentvolume controller if it exists.\nIf the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be\nset to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource\nexists.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass\n(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled." type: "string" volumeMode: - description: "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec." + description: "volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec." type: "string" volumeName: description: "volumeName is the binding reference to the PersistentVolume backing this claim." @@ -4075,14 +4079,14 @@ spec: description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" lun: description: "lun is Optional: FC target lun number" format: "int32" type: "integer" readOnly: - description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" targetWWNs: description: "targetWWNs is Optional: FC target worldwide names (WWNs)" @@ -4090,19 +4094,19 @@ spec: type: "string" type: "array" wwids: - description: "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously." + description: "wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously." items: type: "string" type: "array" type: "object" flexVolume: - description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." + description: "flexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." properties: driver: description: "driver is the name of the driver to use for this volume." type: "string" fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." type: "string" options: additionalProperties: @@ -4110,13 +4114,13 @@ spec: description: "options is Optional: this field holds extra command options if any." type: "object" readOnly: - description: "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretRef: - description: "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." + description: "secretRef is Optional: secretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" @@ -4127,36 +4131,36 @@ spec: description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running" properties: datasetName: - description: "datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated" + description: "datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\nshould be considered as deprecated" type: "string" datasetUUID: description: "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset" type: "string" type: "object" gcePersistentDisk: - description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "gcePersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" properties: fsType: - description: "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" partition: - description: "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" format: "int32" type: "integer" pdName: - description: "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" type: "string" readOnly: - description: "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" type: "boolean" required: - "pdName" type: "object" gitRepo: - description: "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." + description: "gitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." properties: directory: - description: "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name." + description: "directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name." type: "string" repository: description: "repository is the URL" @@ -4168,35 +4172,35 @@ spec: - "repository" type: "object" glusterfs: - description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md" + description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md" properties: endpoints: - description: "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + description: "endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" type: "string" path: - description: "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + description: "path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" type: "string" readOnly: - description: "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + description: "readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" type: "boolean" required: - "endpoints" - "path" type: "object" hostPath: - description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write." + description: "hostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write." properties: path: - description: "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + description: "path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" type: "string" type: - description: "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + description: "type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" type: "string" required: - "path" type: "object" iscsi: - description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md" + description: "iscsi represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md" properties: chapAuthDiscovery: description: "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication" @@ -4205,39 +4209,39 @@ spec: description: "chapAuthSession defines whether support iSCSI Session CHAP authentication" type: "boolean" fsType: - description: "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" initiatorName: - description: "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection." + description: "initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n: will be created for the connection." type: "string" iqn: description: "iqn is the target iSCSI Qualified Name." type: "string" iscsiInterface: - description: "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp)." + description: "iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp)." type: "string" lun: description: "lun represents iSCSI Target Lun number." format: "int32" type: "integer" portals: - description: "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)." + description: "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." items: type: "string" type: "array" readOnly: - description: "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false." + description: "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false." type: "boolean" secretRef: description: "secretRef is the CHAP Secret for iSCSI target and initiator authentication" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" targetPortal: - description: "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)." + description: "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." type: "string" required: - "iqn" @@ -4245,32 +4249,32 @@ spec: - "targetPortal" type: "object" name: - description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + description: "name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" type: "string" nfs: - description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "nfs represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" properties: path: - description: "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" type: "string" readOnly: - description: "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" type: "boolean" server: - description: "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" type: "string" required: - "path" - "server" type: "object" persistentVolumeClaim: - description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + description: "persistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" properties: claimName: - description: "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + description: "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" type: "string" readOnly: - description: "readOnly Will force the ReadOnly setting in VolumeMounts. Default false." + description: "readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false." type: "boolean" required: - "claimName" @@ -4279,7 +4283,7 @@ spec: description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" pdID: description: "pdID is the ID that identifies Photon Controller persistent disk" @@ -4291,10 +4295,10 @@ spec: description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine" properties: fsType: - description: "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" volumeID: description: "volumeID uniquely identifies a Portworx volume" @@ -4306,7 +4310,7 @@ spec: description: "projected items for all in one resources secrets, configmaps, and downward API" properties: defaultMode: - description: "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" sources: @@ -4315,24 +4319,24 @@ spec: description: "Projection that may be projected along with other supported volume types" properties: clusterTrustBundle: - description: "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. \n Alpha, gated by the ClusterTrustBundleProjection feature gate. \n ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. \n Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time." + description: "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field\nof ClusterTrustBundle objects in an auto-updating file.\n\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\n\nClusterTrustBundle objects can either be selected by name, or by the\ncombination of signer name and a label selector.\n\n\nKubelet performs aggressive normalization of the PEM contents written\ninto the pod filesystem. Esoteric PEM features such as inter-block\ncomments and block headers are stripped. Certificates are deduplicated.\nThe ordering of certificates within the file is arbitrary, and Kubelet\nmay change the order over time." properties: labelSelector: - description: "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\"." + description: "Select all ClusterTrustBundles that match this label selector. Only has\neffect if signerName is set. Mutually-exclusive with name. If unset,\ninterpreted as \"match nothing\". If set but empty, interpreted as \"match\neverything\"." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -4344,21 +4348,21 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" name: - description: "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector." + description: "Select a single ClusterTrustBundle by object name. Mutually-exclusive\nwith signerName and labelSelector." type: "string" optional: - description: "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles." + description: "If true, don't block pod startup if the referenced ClusterTrustBundle(s)\naren't available. If using name, then the named ClusterTrustBundle is\nallowed not to exist. If using signerName, then the combination of\nsignerName and labelSelector is allowed to match zero\nClusterTrustBundles." type: "boolean" path: description: "Relative path from the volume root to write the bundle." type: "string" signerName: - description: "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated." + description: "Select all ClusterTrustBundles that match this signer name.\nMutually-exclusive with name. The contents of all selected\nClusterTrustBundles will be unified and deduplicated." type: "string" required: - "path" @@ -4367,7 +4371,7 @@ spec: description: "configMap information about the configMap data to project" properties: items: - description: "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -4375,11 +4379,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -4387,7 +4391,7 @@ spec: type: "object" type: "array" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "optional specify whether the ConfigMap or its keys must be defined" @@ -4416,14 +4420,14 @@ spec: type: "object" x-kubernetes-map-type: "atomic" mode: - description: "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: description: "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" type: "string" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -4451,7 +4455,7 @@ spec: description: "secret information about the secret data to project" properties: items: - description: "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -4459,11 +4463,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -4471,7 +4475,7 @@ spec: type: "object" type: "array" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "optional field specify whether the Secret or its key must be defined" @@ -4482,14 +4486,14 @@ spec: description: "serviceAccountToken is information about the serviceAccountToken data to project" properties: audience: - description: "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver." + description: "audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver." type: "string" expirationSeconds: - description: "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes." + description: "expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes." format: "int64" type: "integer" path: - description: "path is the path relative to the mount point of the file to project the token into." + description: "path is the path relative to the mount point of the file to project the\ntoken into." type: "string" required: - "path" @@ -4501,19 +4505,19 @@ spec: description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime" properties: group: - description: "group to map volume access to Default is no group" + description: "group to map volume access to\nDefault is no group" type: "string" readOnly: - description: "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false." + description: "readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false." type: "boolean" registry: - description: "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes" + description: "registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" type: "string" tenant: - description: "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin" + description: "tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin" type: "string" user: - description: "user to map volume access to Defaults to serivceaccount user" + description: "user to map volume access to\nDefaults to serivceaccount user" type: "string" volume: description: "volume is a string that references an already created Quobyte volume by name." @@ -4523,38 +4527,38 @@ spec: - "volume" type: "object" rbd: - description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md" + description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md" properties: fsType: - description: "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" image: - description: "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" keyring: - description: "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" monitors: - description: "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" items: type: "string" type: "array" pool: - description: "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" readOnly: - description: "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "boolean" secretRef: - description: "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "secretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" user: - description: "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" required: - "image" @@ -4564,7 +4568,7 @@ spec: description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\"." type: "string" gateway: description: "gateway is the host address of the ScaleIO API Gateway." @@ -4573,13 +4577,13 @@ spec: description: "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage." type: "string" readOnly: - description: "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretRef: - description: "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail." + description: "secretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" @@ -4587,7 +4591,7 @@ spec: description: "sslEnabled Flag enable/disable SSL communication with Gateway, default false" type: "boolean" storageMode: - description: "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned." + description: "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned." type: "string" storagePool: description: "storagePool is the ScaleIO Storage Pool associated with the protection domain." @@ -4596,7 +4600,7 @@ spec: description: "system is the name of the storage system as configured in ScaleIO." type: "string" volumeName: - description: "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source." + description: "volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source." type: "string" required: - "gateway" @@ -4604,14 +4608,14 @@ spec: - "system" type: "object" secret: - description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + description: "secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" properties: defaultMode: - description: "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" items: - description: "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -4619,11 +4623,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -4634,38 +4638,38 @@ spec: description: "optional field specify whether the Secret or its keys must be defined" type: "boolean" secretName: - description: "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + description: "secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" type: "string" type: "object" storageos: description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretRef: - description: "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted." + description: "secretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" volumeName: - description: "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace." + description: "volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." type: "string" volumeNamespace: - description: "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created." + description: "volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created." type: "string" type: "object" vsphereVolume: description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" properties: fsType: - description: "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" storagePolicyID: description: "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName." @@ -4688,7 +4692,7 @@ spec: type: "object" type: "object" ttlSecondsAfterFinished: - description: "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes." + description: "ttlSecondsAfterFinished limits the lifetime of a Job that has finished\nexecution (either Complete or Failed). If this field is set,\nttlSecondsAfterFinished after the Job finishes, it is eligible to be\nautomatically deleted. When the Job is being deleted, its lifecycle\nguarantees (e.g. finalizers) will be honored. If this field is unset,\nthe Job won't be automatically deleted. If this field is set to zero,\nthe Job becomes eligible to be deleted immediately after it finishes." format: "int32" type: "integer" required: @@ -4703,8 +4707,23 @@ spec: x-kubernetes-list-map-keys: - "name" x-kubernetes-list-type: "map" + startupPolicy: + description: "StartupPolicy, if set, configures in what order jobs must be started" + properties: + startupPolicyOrder: + description: "StartupPolicyOrder determines the startup order of the ReplicatedJobs.\nAnyOrder means to start replicated jobs in any order.\nInOrder means to start them as they are listed in the JobSet. A ReplicatedJob is started only\nwhen all the jobs of the previous one are ready." + enum: + - "AnyOrder" + - "InOrder" + type: "string" + required: + - "startupPolicyOrder" + type: "object" + x-kubernetes-validations: + - message: "Value is immutable" + rule: "self == oldSelf" successPolicy: - description: "SuccessPolicy configures when to declare the JobSet as succeeded. The JobSet is always declared succeeded if all jobs in the set finished with status complete." + description: "SuccessPolicy configures when to declare the JobSet as\nsucceeded.\nThe JobSet is always declared succeeded if all jobs in the set\nfinished with status complete." properties: operator: description: "Operator determines either All or Any of the selected jobs should succeed to consider the JobSet successful" @@ -4713,7 +4732,7 @@ spec: - "Any" type: "string" targetReplicatedJobs: - description: "TargetReplicatedJobs are the names of the replicated jobs the operator will apply to. A null or empty list will apply to all replicatedJobs." + description: "TargetReplicatedJobs are the names of the replicated jobs the operator will apply to.\nA null or empty list will apply to all replicatedJobs." items: type: "string" type: "array" @@ -4733,23 +4752,23 @@ spec: properties: conditions: items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" properties: lastTransitionTime: - description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + description: "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." format: "date-time" type: "string" message: - description: "message is a human readable message indicating details about the transition. This may be an empty string." + description: "message is a human readable message indicating details about the transition.\nThis may be an empty string." maxLength: 32768 type: "string" observedGeneration: - description: "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance." + description: "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance." format: "int64" minimum: 0.0 type: "integer" reason: - description: "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty." + description: "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty." maxLength: 1024 minLength: 1 pattern: "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$" @@ -4762,7 +4781,7 @@ spec: - "Unknown" type: "string" type: - description: "type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" + description: "type of condition in CamelCase or in foo.example.com/CamelCase.\n---\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" maxLength: 316 pattern: "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$" type: "string" diff --git a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/admissionchecks.yaml b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/admissionchecks.yaml index aa2a0fa47..cc9bd8e72 100644 --- a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/admissionchecks.yaml +++ b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/admissionchecks.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.12.0" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "admissionchecks.kueue.x-k8s.io" spec: group: "kueue.x-k8s.io" @@ -19,10 +19,10 @@ spec: description: "AdmissionCheck is the Schema for the admissionchecks API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -30,7 +30,7 @@ spec: description: "AdmissionCheckSpec defines the desired state of AdmissionCheck" properties: controllerName: - description: "controllerName is name of the controller which will actually perform the checks. This is the name with which controller identifies with, not necessarily a K8S Pod or Deployment name. Cannot be empty." + description: "controllerName is name of the controller which will actually perform\nthe checks. This is the name with which controller identifies with,\nnot necessarily a K8S Pod or Deployment name. Cannot be empty." type: "string" parameters: description: "Parameters identifies the resource providing additional check parameters." @@ -51,7 +51,7 @@ spec: type: "object" retryDelayMinutes: default: 15 - description: "RetryDelayMinutes specifies how long to keep the workload suspended after a failed check (after it transitioned to False). After that the check state goes to \"Unknown\". The default is 15 min." + description: "RetryDelayMinutes specifies how long to keep the workload suspended\nafter a failed check (after it transitioned to False).\nAfter that the check state goes to \"Unknown\".\nThe default is 15 min." format: "int64" type: "integer" required: @@ -61,25 +61,25 @@ spec: description: "AdmissionCheckStatus defines the observed state of AdmissionCheck" properties: conditions: - description: "conditions hold the latest available observations of the AdmissionCheck current state." + description: "conditions hold the latest available observations of the AdmissionCheck\ncurrent state." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" properties: lastTransitionTime: - description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + description: "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." format: "date-time" type: "string" message: - description: "message is a human readable message indicating details about the transition. This may be an empty string." + description: "message is a human readable message indicating details about the transition.\nThis may be an empty string." maxLength: 32768 type: "string" observedGeneration: - description: "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance." + description: "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance." format: "int64" minimum: 0.0 type: "integer" reason: - description: "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty." + description: "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty." maxLength: 1024 minLength: 1 pattern: "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$" @@ -92,7 +92,7 @@ spec: - "Unknown" type: "string" type: - description: "type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" + description: "type of condition in CamelCase or in foo.example.com/CamelCase.\n---\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" maxLength: 316 pattern: "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$" type: "string" diff --git a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/clusterqueues.yaml b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/clusterqueues.yaml index f51a6ae60..1ac0bdb6c 100644 --- a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/clusterqueues.yaml +++ b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/clusterqueues.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.12.0" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "clusterqueues.kueue.x-k8s.io" spec: group: "kueue.x-k8s.io" @@ -38,10 +38,10 @@ spec: description: "ClusterQueue is the Schema for the clusterQueue API." properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -54,42 +54,42 @@ spec: type: "string" type: "array" cohort: - description: "cohort that this ClusterQueue belongs to. CQs that belong to the same cohort can borrow unused resources from each other. \n A CQ can be a member of a single borrowing cohort. A workload submitted to a queue referencing this CQ can borrow quota from any CQ in the cohort. Only quota for the [resource, flavor] pairs listed in the CQ can be borrowed. If empty, this ClusterQueue cannot borrow from any other ClusterQueue and vice versa. \n A cohort is a name that links CQs together, but it doesn't reference any object. \n Validation of a cohort name is equivalent to that of object names: subdomain in DNS (RFC 1123)." + description: "cohort that this ClusterQueue belongs to. CQs that belong to the\nsame cohort can borrow unused resources from each other.\n\n\nA CQ can be a member of a single borrowing cohort. A workload submitted\nto a queue referencing this CQ can borrow quota from any CQ in the cohort.\nOnly quota for the [resource, flavor] pairs listed in the CQ can be\nborrowed.\nIf empty, this ClusterQueue cannot borrow from any other ClusterQueue and\nvice versa.\n\n\nA cohort is a name that links CQs together, but it doesn't reference any\nobject.\n\n\nValidation of a cohort name is equivalent to that of object names:\nsubdomain in DNS (RFC 1123)." type: "string" flavorFungibility: - description: "flavorFungibility defines whether a workload should try the next flavor before borrowing or preempting in the flavor being evaluated." + description: "flavorFungibility defines whether a workload should try the next flavor\nbefore borrowing or preempting in the flavor being evaluated." properties: whenCanBorrow: default: "Borrow" - description: "whenCanBorrow determines whether a workload should try the next flavor before borrowing in current flavor. The possible values are: \n - `Borrow` (default): allocate in current flavor if borrowing is possible. - `TryNextFlavor`: try next flavor even if the current flavor has enough resources to borrow." + description: "whenCanBorrow determines whether a workload should try the next flavor\nbefore borrowing in current flavor. The possible values are:\n\n\n- `Borrow` (default): allocate in current flavor if borrowing\n is possible.\n- `TryNextFlavor`: try next flavor even if the current\n flavor has enough resources to borrow." enum: - "Borrow" - "TryNextFlavor" type: "string" whenCanPreempt: default: "TryNextFlavor" - description: "whenCanPreempt determines whether a workload should try the next flavor before borrowing in current flavor. The possible values are: \n - `Preempt`: allocate in current flavor if it's possible to preempt some workloads. - `TryNextFlavor` (default): try next flavor even if there are enough candidates for preemption in the current flavor." + description: "whenCanPreempt determines whether a workload should try the next flavor\nbefore borrowing in current flavor. The possible values are:\n\n\n- `Preempt`: allocate in current flavor if it's possible to preempt some workloads.\n- `TryNextFlavor` (default): try next flavor even if there are enough\n candidates for preemption in the current flavor." enum: - "Preempt" - "TryNextFlavor" type: "string" type: "object" namespaceSelector: - description: "namespaceSelector defines which namespaces are allowed to submit workloads to this clusterQueue. Beyond this basic support for policy, an policy agent like Gatekeeper should be used to enforce more advanced policies. Defaults to null which is a nothing selector (no namespaces eligible). If set to an empty selector `{}`, then all namespaces are eligible." + description: "namespaceSelector defines which namespaces are allowed to submit workloads to\nthis clusterQueue. Beyond this basic support for policy, a policy agent like\nGatekeeper should be used to enforce more advanced policies.\nDefaults to null which is a nothing selector (no namespaces eligible).\nIf set to an empty selector `{}`, then all namespaces are eligible." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -101,23 +101,23 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" preemption: - description: "preemption describes policies to preempt Workloads from this ClusterQueue or the ClusterQueue's cohort. \n Preemption can happen in two scenarios: \n - When a Workload fits within the nominal quota of the ClusterQueue, but the quota is currently borrowed by other ClusterQueues in the cohort. Preempting Workloads in other ClusterQueues allows this ClusterQueue to reclaim its nominal quota. - When a Workload doesn't fit within the nominal quota of the ClusterQueue and there are admitted Workloads in the ClusterQueue with lower priority. \n The preemption algorithm tries to find a minimal set of Workloads to preempt to accomomdate the pending Workload, preempting Workloads with lower priority first." + description: "preemption describes policies to preempt Workloads from this ClusterQueue\nor the ClusterQueue's cohort.\n\n\nPreemption can happen in two scenarios:\n\n\n- When a Workload fits within the nominal quota of the ClusterQueue, but\n the quota is currently borrowed by other ClusterQueues in the cohort.\n Preempting Workloads in other ClusterQueues allows this ClusterQueue to\n reclaim its nominal quota.\n- When a Workload doesn't fit within the nominal quota of the ClusterQueue\n and there are admitted Workloads in the ClusterQueue with lower priority.\n\n\nThe preemption algorithm tries to find a minimal set of Workloads to\npreempt to accomomdate the pending Workload, preempting Workloads with\nlower priority first." properties: borrowWithinCohort: - description: "borrowWithinCohort provides configuration to allow preemption within cohort while borrowing." + description: "borrowWithinCohort provides configuration to allow preemption within\ncohort while borrowing." properties: maxPriorityThreshold: - description: "maxPriorityThreshold allows to restrict the set of workloads which might be preempted by a borrowing workload, to only workloads with priority less than or equal to the specified threshold priority. When the threshold is not specified, then any workload satisfying the policy can be preempted by the borrowing workload." + description: "maxPriorityThreshold allows to restrict the set of workloads which\nmight be preempted by a borrowing workload, to only workloads with\npriority less than or equal to the specified threshold priority.\nWhen the threshold is not specified, then any workload satisfying the\npolicy can be preempted by the borrowing workload." format: "int32" type: "integer" policy: default: "Never" - description: "policy determines the policy for preemption to reclaim quota within cohort while borrowing. Possible values are: - `Never` (default): do not allow for preemption, in other ClusterQueues within the cohort, for a borrowing workload. - `LowerPriority`: allow preemption, in other ClusterQueues within the cohort, for a borrowing workload, but only if the preempted workloads are of lower priority." + description: "policy determines the policy for preemption to reclaim quota within cohort while borrowing.\nPossible values are:\n- `Never` (default): do not allow for preemption, in other\n ClusterQueues within the cohort, for a borrowing workload.\n- `LowerPriority`: allow preemption, in other ClusterQueues\n within the cohort, for a borrowing workload, but only if\n the preempted workloads are of lower priority." enum: - "Never" - "LowerPriority" @@ -125,7 +125,7 @@ spec: type: "object" reclaimWithinCohort: default: "Never" - description: "reclaimWithinCohort determines whether a pending Workload can preempt Workloads from other ClusterQueues in the cohort that are using more than their nominal quota. The possible values are: \n - `Never` (default): do not preempt Workloads in the cohort. - `LowerPriority`: if the pending Workload fits within the nominal quota of its ClusterQueue, only preempt Workloads in the cohort that have lower priority than the pending Workload. - `Any`: if the pending Workload fits within the nominal quota of its ClusterQueue, preempt any Workload in the cohort, irrespective of priority." + description: "reclaimWithinCohort determines whether a pending Workload can preempt\nWorkloads from other ClusterQueues in the cohort that are using more than\ntheir nominal quota. The possible values are:\n\n\n- `Never` (default): do not preempt Workloads in the cohort.\n- `LowerPriority`: if the pending Workload fits within the nominal\n quota of its ClusterQueue, only preempt Workloads in the cohort that have\n lower priority than the pending Workload.\n- `Any`: if the pending Workload fits within the nominal quota of its\n ClusterQueue, preempt any Workload in the cohort, irrespective of\n priority." enum: - "Never" - "LowerPriority" @@ -133,7 +133,7 @@ spec: type: "string" withinClusterQueue: default: "Never" - description: "withinClusterQueue determines whether a pending Workload that doesn't fit within the nominal quota for its ClusterQueue, can preempt active Workloads in the ClusterQueue. The possible values are: \n - `Never` (default): do not preempt Workloads in the ClusterQueue. - `LowerPriority`: only preempt Workloads in the ClusterQueue that have lower priority than the pending Workload. - `LowerOrNewerEqualPriority`: only preempt Workloads in the ClusterQueue that either have a lower priority than the pending workload or equal priority and are newer than the pending workload." + description: "withinClusterQueue determines whether a pending Workload that doesn't fit\nwithin the nominal quota for its ClusterQueue, can preempt active Workloads in\nthe ClusterQueue. The possible values are:\n\n\n- `Never` (default): do not preempt Workloads in the ClusterQueue.\n- `LowerPriority`: only preempt Workloads in the ClusterQueue that have\n lower priority than the pending Workload.\n- `LowerOrNewerEqualPriority`: only preempt Workloads in the ClusterQueue that\n either have a lower priority than the pending workload or equal priority\n and are newer than the pending workload." enum: - "Never" - "LowerPriority" @@ -142,17 +142,17 @@ spec: type: "object" queueingStrategy: default: "BestEffortFIFO" - description: "QueueingStrategy indicates the queueing strategy of the workloads across the queues in this ClusterQueue. This field is immutable. Current Supported Strategies: \n - StrictFIFO: workloads are ordered strictly by creation time. Older workloads that can't be admitted will block admitting newer workloads even if they fit available quota. - BestEffortFIFO: workloads are ordered by creation time, however older workloads that can't be admitted will not block admitting newer workloads that fit existing quota." + description: "QueueingStrategy indicates the queueing strategy of the workloads\nacross the queues in this ClusterQueue. This field is immutable.\nCurrent Supported Strategies:\n\n\n- StrictFIFO: workloads are ordered strictly by creation time.\nOlder workloads that can't be admitted will block admitting newer\nworkloads even if they fit available quota.\n- BestEffortFIFO: workloads are ordered by creation time,\nhowever older workloads that can't be admitted will not block\nadmitting newer workloads that fit existing quota." enum: - "StrictFIFO" - "BestEffortFIFO" type: "string" resourceGroups: - description: "resourceGroups describes groups of resources. Each resource group defines the list of resources and a list of flavors that provide quotas for these resources. Each resource and each flavor can only form part of one resource group. resourceGroups can be up to 16." + description: "resourceGroups describes groups of resources.\nEach resource group defines the list of resources and a list of flavors\nthat provide quotas for these resources.\nEach resource and each flavor can only form part of one resource group.\nresourceGroups can be up to 16." items: properties: coveredResources: - description: "coveredResources is the list of resources covered by the flavors in this group. Examples: cpu, memory, vendor.com/gpu. The list cannot be empty and it can contain up to 16 resources." + description: "coveredResources is the list of resources covered by the flavors in this\ngroup.\nExamples: cpu, memory, vendor.com/gpu.\nThe list cannot be empty and it can contain up to 16 resources." items: description: "ResourceName is the name identifying various resources in a ResourceList." type: "string" @@ -160,21 +160,28 @@ spec: minItems: 1 type: "array" flavors: - description: "flavors is the list of flavors that provide the resources of this group. Typically, different flavors represent different hardware models (e.g., gpu models, cpu architectures) or pricing models (on-demand vs spot cpus). Each flavor MUST list all the resources listed for this group in the same order as the .resources field. The list cannot be empty and it can contain up to 16 flavors." + description: "flavors is the list of flavors that provide the resources of this group.\nTypically, different flavors represent different hardware models\n(e.g., gpu models, cpu architectures) or pricing models (on-demand vs spot\ncpus).\nEach flavor MUST list all the resources listed for this group in the same\norder as the .resources field.\nThe list cannot be empty and it can contain up to 16 flavors." items: properties: name: - description: "name of this flavor. The name should match the .metadata.name of a ResourceFlavor. If a matching ResourceFlavor does not exist, the ClusterQueue will have an Active condition set to False." + description: "name of this flavor. The name should match the .metadata.name of a\nResourceFlavor. If a matching ResourceFlavor does not exist, the\nClusterQueue will have an Active condition set to False." type: "string" resources: - description: "resources is the list of quotas for this flavor per resource. There could be up to 16 resources." + description: "resources is the list of quotas for this flavor per resource.\nThere could be up to 16 resources." items: properties: borrowingLimit: anyOf: - type: "integer" - type: "string" - description: "borrowingLimit is the maximum amount of quota for the [flavor, resource] combination that this ClusterQueue is allowed to borrow from the unused quota of other ClusterQueues in the same cohort. In total, at a given time, Workloads in a ClusterQueue can consume a quantity of quota equal to nominalQuota+borrowingLimit, assuming the other ClusterQueues in the cohort have enough unused quota. If null, it means that there is no borrowing limit. If not null, it must be non-negative. borrowingLimit must be null if spec.cohort is empty." + description: "borrowingLimit is the maximum amount of quota for the [flavor, resource]\ncombination that this ClusterQueue is allowed to borrow from the unused\nquota of other ClusterQueues in the same cohort.\nIn total, at a given time, Workloads in a ClusterQueue can consume a\nquantity of quota equal to nominalQuota+borrowingLimit, assuming the other\nClusterQueues in the cohort have enough unused quota.\nIf null, it means that there is no borrowing limit.\nIf not null, it must be non-negative.\nborrowingLimit must be null if spec.cohort is empty." + pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" + x-kubernetes-int-or-string: true + lendingLimit: + anyOf: + - type: "integer" + - type: "string" + description: "lendingLimit is the maximum amount of unused quota for the [flavor, resource]\ncombination that this ClusterQueue can lend to other ClusterQueues in the same cohort.\nIn total, at a given time, ClusterQueue reserves for its exclusive use\na quantity of quota equals to nominalQuota - lendingLimit.\nIf null, it means that there is no lending limit, meaning that\nall the nominalQuota can be borrowed by other clusterQueues in the cohort.\nIf not null, it must be non-negative.\nlendingLimit must be null if spec.cohort is empty.\nThis field is in alpha stage. To be able to use this field,\nenable the feature gate LendingLimit, which is disabled by default." pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true name: @@ -184,7 +191,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "nominalQuota is the quantity of this resource that is available for Workloads admitted by this ClusterQueue at a point in time. The nominalQuota must be non-negative. nominalQuota should represent the resources in the cluster available for running jobs (after discounting resources consumed by system components and pods not managed by kueue). In an autoscaled cluster, nominalQuota should account for resources that can be provided by a component such as Kubernetes cluster-autoscaler. \n If the ClusterQueue belongs to a cohort, the sum of the quotas for each (flavor, resource) combination defines the maximum quantity that can be allocated by a ClusterQueue in the cohort." + description: "nominalQuota is the quantity of this resource that is available for\nWorkloads admitted by this ClusterQueue at a point in time.\nThe nominalQuota must be non-negative.\nnominalQuota should represent the resources in the cluster available for\nrunning jobs (after discounting resources consumed by system components\nand pods not managed by kueue). In an autoscaled cluster, nominalQuota\nshould account for resources that can be provided by a component such as\nKubernetes cluster-autoscaler.\n\n\nIf the ClusterQueue belongs to a cohort, the sum of the quotas for each\n(flavor, resource) combination defines the maximum quantity that can be\nallocated by a ClusterQueue in the cohort." pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true required: @@ -216,7 +223,7 @@ spec: x-kubernetes-list-type: "atomic" stopPolicy: default: "None" - description: "stopPolicy - if set to a value different than None, the ClusterQueue is considered Inactive, no new reservation being made. \n Depending on its value, its associated workloads will: \n - None - Workloads are admitted - HoldAndDrain - Admitted workloads are evicted and Reserving workloads will cancel the reservation. - Hold - Admitted workloads will run to completion and Reserving workloads will cancel the reservation." + description: "stopPolicy - if set to a value different from None, the ClusterQueue is considered Inactive, no new reservation being\nmade.\n\n\nDepending on its value, its associated workloads will:\n\n\n- None - Workloads are admitted\n- HoldAndDrain - Admitted workloads are evicted and Reserving workloads will cancel the reservation.\n- Hold - Admitted workloads will run to completion and Reserving workloads will cancel the reservation." enum: - "None" - "Hold" @@ -227,29 +234,29 @@ spec: description: "ClusterQueueStatus defines the observed state of ClusterQueue" properties: admittedWorkloads: - description: "admittedWorkloads is the number of workloads currently admitted to this clusterQueue and haven't finished yet." + description: "admittedWorkloads is the number of workloads currently admitted to this\nclusterQueue and haven't finished yet." format: "int32" type: "integer" conditions: - description: "conditions hold the latest available observations of the ClusterQueue current state." + description: "conditions hold the latest available observations of the ClusterQueue\ncurrent state." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" properties: lastTransitionTime: - description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + description: "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." format: "date-time" type: "string" message: - description: "message is a human readable message indicating details about the transition. This may be an empty string." + description: "message is a human readable message indicating details about the transition.\nThis may be an empty string." maxLength: 32768 type: "string" observedGeneration: - description: "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance." + description: "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance." format: "int64" minimum: 0.0 type: "integer" reason: - description: "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty." + description: "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty." maxLength: 1024 minLength: 1 pattern: "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$" @@ -262,7 +269,7 @@ spec: - "Unknown" type: "string" type: - description: "type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" + description: "type of condition in CamelCase or in foo.example.com/CamelCase.\n---\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" maxLength: 316 pattern: "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$" type: "string" @@ -278,7 +285,7 @@ spec: - "type" x-kubernetes-list-type: "map" flavorsReservation: - description: "flavorsReservation are the reserved quotas, by flavor, currently in use by the workloads assigned to this ClusterQueue." + description: "flavorsReservation are the reserved quotas, by flavor, currently in use by the\nworkloads assigned to this ClusterQueue." items: properties: name: @@ -292,7 +299,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Borrowed is quantity of quota that is borrowed from the cohort. In other words, it's the used quota that is over the nominalQuota." + description: "Borrowed is quantity of quota that is borrowed from the cohort. In other\nwords, it's the used quota that is over the nominalQuota." pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true name: @@ -302,7 +309,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "total is the total quantity of used quota, including the amount borrowed from the cohort." + description: "total is the total quantity of used quota, including the amount borrowed\nfrom the cohort." pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true required: @@ -323,7 +330,7 @@ spec: - "name" x-kubernetes-list-type: "map" flavorsUsage: - description: "flavorsUsage are the used quotas, by flavor, currently in use by the workloads admitted in this ClusterQueue." + description: "flavorsUsage are the used quotas, by flavor, currently in use by the\nworkloads admitted in this ClusterQueue." items: properties: name: @@ -337,7 +344,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Borrowed is quantity of quota that is borrowed from the cohort. In other words, it's the used quota that is over the nominalQuota." + description: "Borrowed is quantity of quota that is borrowed from the cohort. In other\nwords, it's the used quota that is over the nominalQuota." pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true name: @@ -347,7 +354,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "total is the total quantity of used quota, including the amount borrowed from the cohort." + description: "total is the total quantity of used quota, including the amount borrowed\nfrom the cohort." pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true required: @@ -368,16 +375,16 @@ spec: - "name" x-kubernetes-list-type: "map" pendingWorkloads: - description: "pendingWorkloads is the number of workloads currently waiting to be admitted to this clusterQueue." + description: "pendingWorkloads is the number of workloads currently waiting to be\nadmitted to this clusterQueue." format: "int32" type: "integer" pendingWorkloadsStatus: - description: "PendingWorkloadsStatus contains the information exposed about the current status of the pending workloads in the cluster queue." + description: "PendingWorkloadsStatus contains the information exposed about the current\nstatus of the pending workloads in the cluster queue." properties: clusterQueuePendingWorkload: description: "Head contains the list of top pending workloads." items: - description: "ClusterQueuePendingWorkload contains the information identifying a pending workload in the cluster queue." + description: "ClusterQueuePendingWorkload contains the information identifying a pending workload\nin the cluster queue." properties: name: description: "Name indicates the name of the pending workload." @@ -399,7 +406,7 @@ spec: - "lastChangeTime" type: "object" reservingWorkloads: - description: "reservingWorkloads is the number of workloads currently reserving quota in this clusterQueue." + description: "reservingWorkloads is the number of workloads currently reserving quota in this\nclusterQueue." format: "int32" type: "integer" type: "object" diff --git a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/localqueues.yaml b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/localqueues.yaml index 777dd426b..686e3693d 100644 --- a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/localqueues.yaml +++ b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/localqueues.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.12.0" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "localqueues.kueue.x-k8s.io" spec: group: "kueue.x-k8s.io" @@ -35,10 +35,10 @@ spec: description: "LocalQueue is the Schema for the localQueues API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -53,29 +53,29 @@ spec: description: "LocalQueueStatus defines the observed state of LocalQueue" properties: admittedWorkloads: - description: "admittedWorkloads is the number of workloads in this LocalQueue admitted to a ClusterQueue and that haven't finished yet." + description: "admittedWorkloads is the number of workloads in this LocalQueue\nadmitted to a ClusterQueue and that haven't finished yet." format: "int32" type: "integer" conditions: - description: "Conditions hold the latest available observations of the LocalQueue current state." + description: "Conditions hold the latest available observations of the LocalQueue\ncurrent state." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" properties: lastTransitionTime: - description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + description: "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." format: "date-time" type: "string" message: - description: "message is a human readable message indicating details about the transition. This may be an empty string." + description: "message is a human readable message indicating details about the transition.\nThis may be an empty string." maxLength: 32768 type: "string" observedGeneration: - description: "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance." + description: "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance." format: "int64" minimum: 0.0 type: "integer" reason: - description: "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty." + description: "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty." maxLength: 1024 minLength: 1 pattern: "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$" @@ -88,7 +88,7 @@ spec: - "Unknown" type: "string" type: - description: "type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" + description: "type of condition in CamelCase or in foo.example.com/CamelCase.\n---\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" maxLength: 316 pattern: "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$" type: "string" @@ -104,7 +104,7 @@ spec: - "type" x-kubernetes-list-type: "map" flavorUsage: - description: "flavorsUsage are the used quotas, by flavor currently in use by the workloads assigned to this LocalQueue." + description: "flavorsUsage are the used quotas, by flavor currently in use by the\nworkloads assigned to this LocalQueue." items: properties: name: @@ -142,7 +142,7 @@ spec: - "name" x-kubernetes-list-type: "map" flavorsReservation: - description: "flavorsReservation are the reserved quotas, by flavor currently in use by the workloads assigned to this LocalQueue." + description: "flavorsReservation are the reserved quotas, by flavor currently in use by the\nworkloads assigned to this LocalQueue." items: properties: name: @@ -184,7 +184,7 @@ spec: format: "int32" type: "integer" reservingWorkloads: - description: "reservingWorkloads is the number of workloads in this LocalQueue reserving quota in a ClusterQueue and that haven't finished yet." + description: "reservingWorkloads is the number of workloads in this LocalQueue\nreserving quota in a ClusterQueue and that haven't finished yet." format: "int32" type: "integer" type: "object" diff --git a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/resourceflavors.yaml b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/resourceflavors.yaml index c33de0085..36096a72d 100644 --- a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/resourceflavors.yaml +++ b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/resourceflavors.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.12.0" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "resourceflavors.kueue.x-k8s.io" spec: group: "kueue.x-k8s.io" @@ -22,10 +22,10 @@ spec: description: "ResourceFlavor is the Schema for the resourceflavors API." properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -35,23 +35,23 @@ spec: nodeLabels: additionalProperties: type: "string" - description: "nodeLabels are labels that associate the ResourceFlavor with Nodes that have the same labels. When a Workload is admitted, its podsets can only get assigned ResourceFlavors whose nodeLabels match the nodeSelector and nodeAffinity fields. Once a ResourceFlavor is assigned to a podSet, the ResourceFlavor's nodeLabels should be injected into the pods of the Workload by the controller that integrates with the Workload object. \n nodeLabels can be up to 8 elements." + description: "nodeLabels are labels that associate the ResourceFlavor with Nodes that\nhave the same labels.\nWhen a Workload is admitted, its podsets can only get assigned\nResourceFlavors whose nodeLabels match the nodeSelector and nodeAffinity\nfields.\nOnce a ResourceFlavor is assigned to a podSet, the ResourceFlavor's\nnodeLabels should be injected into the pods of the Workload by the\ncontroller that integrates with the Workload object.\n\n\nnodeLabels can be up to 8 elements." maxProperties: 8 type: "object" x-kubernetes-map-type: "atomic" nodeTaints: - description: "nodeTaints are taints that the nodes associated with this ResourceFlavor have. Workloads' podsets must have tolerations for these nodeTaints in order to get assigned this ResourceFlavor during admission. \n An example of a nodeTaint is cloud.provider.com/preemptible=\"true\":NoSchedule \n nodeTaints can be up to 8 elements." + description: "nodeTaints are taints that the nodes associated with this ResourceFlavor\nhave.\nWorkloads' podsets must have tolerations for these nodeTaints in order to\nget assigned this ResourceFlavor during admission.\n\n\nAn example of a nodeTaint is\ncloud.provider.com/preemptible=\"true\":NoSchedule\n\n\nnodeTaints can be up to 8 elements." items: - description: "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint." + description: "The node this Taint is attached to has the \"effect\" on\nany pod that does not tolerate the Taint." properties: effect: - description: "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute." + description: "Required. The effect of the taint on pods\nthat do not tolerate the taint.\nValid effects are NoSchedule, PreferNoSchedule and NoExecute." type: "string" key: description: "Required. The taint key to be applied to a node." type: "string" timeAdded: - description: "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints." + description: "TimeAdded represents the time at which the taint was added.\nIt is only written for NoExecute taints." format: "date-time" type: "string" value: @@ -65,25 +65,25 @@ spec: type: "array" x-kubernetes-list-type: "atomic" tolerations: - description: "tolerations are extra tolerations that will be added to the pods admitted in the quota associated with this resource flavor. \n An example of a toleration is cloud.provider.com/preemptible=\"true\":NoSchedule \n tolerations can be up to 8 elements." + description: "tolerations are extra tolerations that will be added to the pods admitted in\nthe quota associated with this resource flavor.\n\n\nAn example of a toleration is\ncloud.provider.com/preemptible=\"true\":NoSchedule\n\n\ntolerations can be up to 8 elements." items: - description: "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator ." + description: "The pod this Toleration is attached to tolerates any taint that matches\nthe triple using the matching operator ." properties: effect: - description: "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." + description: "Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." type: "string" key: - description: "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys." + description: "Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys." type: "string" operator: - description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category." + description: "Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category." type: "string" tolerationSeconds: - description: "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system." + description: "TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system." format: "int64" type: "integer" value: - description: "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string." + description: "Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string." type: "string" type: "object" maxItems: 8 diff --git a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/workloads.yaml b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/workloads.yaml index 93bba5f85..9aea616c7 100644 --- a/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/workloads.yaml +++ b/crd-catalog/kubernetes-sigs/kueue/kueue.x-k8s.io/v1beta1/workloads.yaml @@ -2,7 +2,7 @@ apiVersion: "apiextensions.k8s.io/v1" kind: "CustomResourceDefinition" metadata: annotations: - controller-gen.kubebuilder.io/version: "v0.12.0" + controller-gen.kubebuilder.io/version: "v0.14.0" name: "workloads.kueue.x-k8s.io" spec: group: "kueue.x-k8s.io" @@ -34,10 +34,10 @@ spec: description: "Workload is the Schema for the workloads API" properties: apiVersion: - description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + description: "APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" type: "string" kind: - description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + description: "Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" type: "string" metadata: type: "object" @@ -46,10 +46,10 @@ spec: properties: active: default: true - description: "Active determines if a workload can be admitted into a queue. Changing active from true to false will evict any running workloads. Possible values are: \n - false: indicates that a workload should never be admitted and evicts running workloads - true: indicates that a workload can be evaluated for admission into it's respective queue. \n Defaults to true" + description: "Active determines if a workload can be admitted into a queue.\nChanging active from true to false will evict any running workloads.\nPossible values are:\n\n\n - false: indicates that a workload should never be admitted and evicts running workloads\n - true: indicates that a workload can be evaluated for admission into it's respective queue.\n\n\nDefaults to true" type: "boolean" podSets: - description: "podSets is a list of sets of homogeneous pods, each described by a Pod spec and a count. There must be at least one element and at most 8. podSets cannot be changed." + description: "podSets is a list of sets of homogeneous pods, each described by a Pod spec\nand a count.\nThere must be at least one element and at most 8.\npodSets cannot be changed." items: properties: count: @@ -58,17 +58,17 @@ spec: minimum: 1.0 type: "integer" minCount: - description: "minCount is the minimum number of pods for the spec acceptable if the workload supports partial admission. \n If not provided, partial admission for the current PodSet is not enabled. \n Only one podSet within the workload can use this. \n This is an alpha field and requires enabling PartialAdmission feature gate." + description: "minCount is the minimum number of pods for the spec acceptable\nif the workload supports partial admission.\n\n\nIf not provided, partial admission for the current PodSet is not\nenabled.\n\n\nOnly one podSet within the workload can use this.\n\n\nThis is an alpha field and requires enabling PartialAdmission feature gate." format: "int32" type: "integer" name: description: "name is the PodSet name." type: "string" template: - description: "template is the Pod template. \n The only allowed fields in template.metadata are labels and annotations. \n If requests are omitted for a container or initContainer, they default to the limits if they are explicitly specified for the container or initContainer. \n During admission, the rules in nodeSelector and nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution that match the keys in the nodeLabels from the ResourceFlavors considered for this Workload are used to filter the ResourceFlavors that can be assigned to this podSet." + description: "template is the Pod template.\n\n\nThe only allowed fields in template.metadata are labels and annotations.\n\n\nIf requests are omitted for a container or initContainer,\nthey default to the limits if they are explicitly specified for the\ncontainer or initContainer.\n\n\nDuring admission, the rules in nodeSelector and\nnodeAffinity.requiredDuringSchedulingIgnoredDuringExecution that match\nthe keys in the nodeLabels from the ResourceFlavors considered for this\nWorkload are used to filter the ResourceFlavors that can be assigned to\nthis podSet." properties: metadata: - description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + description: "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" properties: annotations: additionalProperties: @@ -88,10 +88,10 @@ spec: type: "string" type: "object" spec: - description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + description: "Specification of the desired behavior of the pod.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" properties: activeDeadlineSeconds: - description: "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer." + description: "Optional duration in seconds the pod may be active on the node relative to\nStartTime before the system will actively try to mark it failed and kill associated containers.\nValue must be a positive integer." format: "int64" type: "integer" affinity: @@ -101,9 +101,9 @@ spec: description: "Describes node affinity scheduling rules for the pod." properties: preferredDuringSchedulingIgnoredDuringExecution: - description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred." + description: "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred." items: - description: "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op)." + description: "An empty preferred scheduling term matches all objects with implicit weight 0\n(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op)." properties: preference: description: "A node selector term, associated with the corresponding weight." @@ -111,16 +111,16 @@ spec: matchExpressions: description: "A list of node selector requirements by node's labels." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -132,16 +132,16 @@ spec: matchFields: description: "A list of node selector requirements by node's fields." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -162,26 +162,26 @@ spec: type: "object" type: "array" requiredDuringSchedulingIgnoredDuringExecution: - description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node." + description: "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to an update), the system\nmay or may not try to eventually evict the pod from its node." properties: nodeSelectorTerms: description: "Required. A list of node selector terms. The terms are ORed." items: - description: "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm." + description: "A null or empty node selector term matches no objects. The requirements of\nthem are ANDed.\nThe TopologySelectorTerm type implements a subset of the NodeSelectorTerm." properties: matchExpressions: description: "A list of node selector requirements by node's labels." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -193,16 +193,16 @@ spec: matchFields: description: "A list of node selector requirements by node's fields." items: - description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." properties: key: description: "The label key that the selector applies to." type: "string" operator: - description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + description: "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." type: "string" values: - description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch." + description: "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch." items: type: "string" type: "array" @@ -223,7 +223,7 @@ spec: description: "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))." properties: preferredDuringSchedulingIgnoredDuringExecution: - description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred." + description: "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." items: description: "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" properties: @@ -231,21 +231,21 @@ spec: description: "Required. A pod affinity term, associated with the corresponding weight." properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -257,38 +257,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -300,23 +300,23 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" type: "object" weight: - description: "weight associated with matching the corresponding podAffinityTerm, in the range 1-100." + description: "weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." format: "int32" type: "integer" required: @@ -325,26 +325,26 @@ spec: type: "object" type: "array" requiredDuringSchedulingIgnoredDuringExecution: - description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied." + description: "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." items: - description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running" + description: "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key matches that of any node on which\na pod of the set of pods is running" properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -356,38 +356,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -399,17 +399,17 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" @@ -420,7 +420,7 @@ spec: description: "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))." properties: preferredDuringSchedulingIgnoredDuringExecution: - description: "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred." + description: "The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred." items: description: "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" properties: @@ -428,21 +428,21 @@ spec: description: "Required. A pod affinity term, associated with the corresponding weight." properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -454,38 +454,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -497,23 +497,23 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" type: "object" weight: - description: "weight associated with matching the corresponding podAffinityTerm, in the range 1-100." + description: "weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." format: "int32" type: "integer" required: @@ -522,26 +522,26 @@ spec: type: "object" type: "array" requiredDuringSchedulingIgnoredDuringExecution: - description: "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied." + description: "If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied." items: - description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running" + description: "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key matches that of any node on which\na pod of the set of pods is running" properties: labelSelector: - description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + description: "A label query over a set of resources, in this case pods.\nIf it's null, this PodAffinityTerm matches with no Pods." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -553,38 +553,38 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nAlso, MatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" mismatchLabelKeys: - description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." + description: "MismatchLabelKeys is a set of pod label keys to select which pods will\nbe taken into consideration. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`\nto select the group of existing pods which pods will be taken into consideration\nfor the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming\npod labels will be ignored. The default value is empty.\nThe same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.\nAlso, MismatchLabelKeys cannot be set when LabelSelector isn't set.\nThis is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" namespaceSelector: - description: "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + description: "A label query over the set of namespaces that the term applies to.\nThe term is applied to the union of the namespaces selected by this field\nand the ones listed in the namespaces field.\nnull selector and null or empty namespaces list means \"this pod's namespace\".\nAn empty selector ({}) matches all namespaces." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -596,17 +596,17 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" namespaces: - description: "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." + description: "namespaces specifies a static list of namespace names that the term applies to.\nThe term is applied to the union of the namespaces listed in this field\nand the ones selected by namespaceSelector.\nnull or empty namespaces list and null namespaceSelector means \"this pod's namespace\"." items: type: "string" type: "array" topologyKey: - description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed." + description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." type: "string" required: - "topologyKey" @@ -618,22 +618,22 @@ spec: description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted." type: "boolean" containers: - description: "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated." + description: "List of containers belonging to the pod.\nContainers cannot currently be added or removed.\nThere must be at least one container in a Pod.\nCannot be updated." items: description: "A single application container that you want to run within a pod." properties: args: - description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" command: - description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" env: - description: "List of environment variables to set in the container. Cannot be updated." + description: "List of environment variables to set in the container.\nCannot be updated." items: description: "EnvVar represents an environment variable present in a Container." properties: @@ -641,7 +641,7 @@ spec: description: "Name of the environment variable. Must be a C_IDENTIFIER." type: "string" value: - description: "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\"." + description: "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." type: "string" valueFrom: description: "Source for the environment variable's value. Cannot be used if value is not empty." @@ -653,7 +653,7 @@ spec: description: "The key to select." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap or its key must be defined" @@ -663,7 +663,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" fieldRef: - description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." properties: apiVersion: description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." @@ -676,7 +676,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -702,7 +702,7 @@ spec: description: "The key of the secret to select from. Must be a valid secret key." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret or its key must be defined" @@ -717,7 +717,7 @@ spec: type: "object" type: "array" envFrom: - description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated." + description: "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." items: description: "EnvFromSource represents the source of a set of ConfigMaps" properties: @@ -725,7 +725,7 @@ spec: description: "The ConfigMap to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap must be defined" @@ -739,7 +739,7 @@ spec: description: "The Secret to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret must be defined" @@ -749,22 +749,22 @@ spec: type: "object" type: "array" image: - description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets." + description: "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets." type: "string" imagePullPolicy: - description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + description: "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" type: "string" lifecycle: - description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated." + description: "Actions that the management system should take in response to container lifecycle events.\nCannot be updated." properties: postStart: - description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -773,7 +773,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -781,7 +781,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -798,10 +798,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -817,7 +817,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -826,20 +826,20 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" type: "object" preStop: - description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -848,7 +848,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -856,7 +856,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -873,10 +873,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -892,7 +892,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -901,7 +901,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" @@ -909,19 +909,19 @@ spec: type: "object" type: "object" livenessProbe: - description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -932,7 +932,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -941,7 +941,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -949,7 +949,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -966,24 +966,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -996,45 +996,45 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" name: - description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated." + description: "Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." type: "string" ports: - description: "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated." + description: "List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." items: description: "ContainerPort represents a network port in a single container." properties: containerPort: - description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536." + description: "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." format: "int32" type: "integer" hostIP: description: "What host IP to bind the external port to." type: "string" hostPort: - description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this." + description: "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." format: "int32" type: "integer" name: - description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services." + description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." type: "string" protocol: default: "TCP" - description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\"." + description: "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." type: "string" required: - "containerPort" @@ -1045,19 +1045,19 @@ spec: - "protocol" x-kubernetes-list-type: "map" readinessProbe: - description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1068,7 +1068,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1077,7 +1077,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1085,7 +1085,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1102,24 +1102,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -1132,17 +1132,17 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" @@ -1152,10 +1152,10 @@ spec: description: "ContainerResizePolicy represents resource resize policy for the container." properties: resourceName: - description: "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory." + description: "Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." type: "string" restartPolicy: - description: "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired." + description: "Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." type: "string" required: - "resourceName" @@ -1164,15 +1164,15 @@ spec: type: "array" x-kubernetes-list-type: "atomic" resources: - description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + description: "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." items: description: "ResourceClaim references one entry in PodSpec.ResourceClaims." properties: name: - description: "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." + description: "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." type: "string" required: - "name" @@ -1188,7 +1188,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -1197,20 +1197,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" restartPolicy: - description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed." + description: "RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is \"Always\".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod's restart policy and the container type.\nSetting the RestartPolicy as \"Always\" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy \"Always\"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a \"sidecar\" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted." type: "string" securityContext: - description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + description: "SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" properties: allowPrivilegeEscalation: - description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows." + description: "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" capabilities: - description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." + description: "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." properties: add: description: "Added capabilities" @@ -1226,27 +1226,27 @@ spec: type: "array" type: "object" privileged: - description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows." + description: "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" procMount: - description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows." + description: "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." type: "string" readOnlyRootFilesystem: - description: "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows." + description: "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -1262,48 +1262,48 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" startupProbe: - description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1314,7 +1314,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1323,7 +1323,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1331,7 +1331,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1348,24 +1348,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -1378,34 +1378,34 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" stdin: - description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false." + description: "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." type: "boolean" stdinOnce: - description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false" + description: "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" type: "boolean" terminationMessagePath: - description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated." + description: "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." type: "string" terminationMessagePolicy: - description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated." + description: "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." type: "string" tty: - description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false." + description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." type: "boolean" volumeDevices: description: "volumeDevices is the list of block devices to be used by the container." @@ -1424,27 +1424,27 @@ spec: type: "object" type: "array" volumeMounts: - description: "Pod volumes to mount into the container's filesystem. Cannot be updated." + description: "Pod volumes to mount into the container's filesystem.\nCannot be updated." items: description: "VolumeMount describes a mounting of a Volume within a container." properties: mountPath: - description: "Path within the container at which the volume should be mounted. Must not contain ':'." + description: "Path within the container at which the volume should be mounted. Must\nnot contain ':'." type: "string" mountPropagation: - description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10." + description: "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10." type: "string" name: description: "This must match the Name of a Volume." type: "string" readOnly: - description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false." + description: "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." type: "boolean" subPath: - description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root)." + description: "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." type: "string" subPathExpr: - description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive." + description: "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." type: "string" required: - "mountPath" @@ -1452,22 +1452,22 @@ spec: type: "object" type: "array" workingDir: - description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated." + description: "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." type: "string" required: - "name" type: "object" type: "array" dnsConfig: - description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy." + description: "Specifies the DNS parameters of a pod.\nParameters specified here will be merged to the generated DNS\nconfiguration based on DNSPolicy." properties: nameservers: - description: "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed." + description: "A list of DNS name server IP addresses.\nThis will be appended to the base nameservers generated from DNSPolicy.\nDuplicated nameservers will be removed." items: type: "string" type: "array" options: - description: "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy." + description: "A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy." items: description: "PodDNSConfigOption defines DNS resolver options of a pod." properties: @@ -1479,34 +1479,34 @@ spec: type: "object" type: "array" searches: - description: "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed." + description: "A list of DNS search domains for host-name lookup.\nThis will be appended to the base search paths generated from DNSPolicy.\nDuplicated search paths will be removed." items: type: "string" type: "array" type: "object" dnsPolicy: - description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'." + description: "Set DNS policy for the pod.\nDefaults to \"ClusterFirst\".\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\nexplicitly to 'ClusterFirstWithHostNet'." type: "string" enableServiceLinks: - description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true." + description: "EnableServiceLinks indicates whether information about services should be injected into pod's\nenvironment variables, matching the syntax of Docker links.\nOptional: Defaults to true." type: "boolean" ephemeralContainers: - description: "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource." + description: "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing\npod to perform user-initiated actions such as debugging. This list cannot be specified when\ncreating a pod, and it cannot be modified by updating the pod spec. In order to add an\nephemeral container to an existing pod, use the pod's ephemeralcontainers subresource." items: - description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted." + description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for\nuser-initiated activities such as debugging. Ephemeral containers have no resource or\nscheduling guarantees, and they will not be restarted when they exit or when a Pod is\nremoved or restarted. The kubelet may evict a Pod if an ephemeral container causes the\nPod to exceed its resource allocation.\n\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing\nPod. Ephemeral containers may not be removed or restarted." properties: args: - description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Arguments to the entrypoint.\nThe image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" command: - description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Entrypoint array. Not executed within a shell.\nThe image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" env: - description: "List of environment variables to set in the container. Cannot be updated." + description: "List of environment variables to set in the container.\nCannot be updated." items: description: "EnvVar represents an environment variable present in a Container." properties: @@ -1514,7 +1514,7 @@ spec: description: "Name of the environment variable. Must be a C_IDENTIFIER." type: "string" value: - description: "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\"." + description: "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." type: "string" valueFrom: description: "Source for the environment variable's value. Cannot be used if value is not empty." @@ -1526,7 +1526,7 @@ spec: description: "The key to select." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap or its key must be defined" @@ -1536,7 +1536,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" fieldRef: - description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." properties: apiVersion: description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." @@ -1549,7 +1549,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -1575,7 +1575,7 @@ spec: description: "The key of the secret to select from. Must be a valid secret key." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret or its key must be defined" @@ -1590,7 +1590,7 @@ spec: type: "object" type: "array" envFrom: - description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated." + description: "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." items: description: "EnvFromSource represents the source of a set of ConfigMaps" properties: @@ -1598,7 +1598,7 @@ spec: description: "The ConfigMap to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap must be defined" @@ -1612,7 +1612,7 @@ spec: description: "The Secret to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret must be defined" @@ -1622,22 +1622,22 @@ spec: type: "object" type: "array" image: - description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images" + description: "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images" type: "string" imagePullPolicy: - description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + description: "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" type: "string" lifecycle: description: "Lifecycle is not allowed for ephemeral containers." properties: postStart: - description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -1646,7 +1646,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1654,7 +1654,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1671,10 +1671,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -1690,7 +1690,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -1699,20 +1699,20 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" type: "object" preStop: - description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -1721,7 +1721,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1729,7 +1729,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1746,10 +1746,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -1765,7 +1765,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -1774,7 +1774,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" @@ -1788,13 +1788,13 @@ spec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1805,7 +1805,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1814,7 +1814,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1822,7 +1822,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1839,24 +1839,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -1869,22 +1869,22 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" name: - description: "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers." + description: "Name of the ephemeral container specified as a DNS_LABEL.\nThis name must be unique among all containers, init containers and ephemeral containers." type: "string" ports: description: "Ports are not allowed for ephemeral containers." @@ -1892,22 +1892,22 @@ spec: description: "ContainerPort represents a network port in a single container." properties: containerPort: - description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536." + description: "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." format: "int32" type: "integer" hostIP: description: "What host IP to bind the external port to." type: "string" hostPort: - description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this." + description: "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." format: "int32" type: "integer" name: - description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services." + description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." type: "string" protocol: default: "TCP" - description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\"." + description: "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." type: "string" required: - "containerPort" @@ -1924,13 +1924,13 @@ spec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -1941,7 +1941,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -1950,7 +1950,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -1958,7 +1958,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -1975,24 +1975,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2005,17 +2005,17 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" @@ -2025,10 +2025,10 @@ spec: description: "ContainerResizePolicy represents resource resize policy for the container." properties: resourceName: - description: "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory." + description: "Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." type: "string" restartPolicy: - description: "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired." + description: "Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." type: "string" required: - "resourceName" @@ -2037,15 +2037,15 @@ spec: type: "array" x-kubernetes-list-type: "atomic" resources: - description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod." + description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources\nalready allocated to the pod." properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + description: "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." items: description: "ResourceClaim references one entry in PodSpec.ResourceClaims." properties: name: - description: "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." + description: "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." type: "string" required: - "name" @@ -2061,7 +2061,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -2070,20 +2070,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" restartPolicy: - description: "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers." + description: "Restart policy for the container to manage the restart behavior of each\ncontainer within a pod.\nThis may only be set for init containers. You cannot set this field on\nephemeral containers." type: "string" securityContext: - description: "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext." + description: "Optional: SecurityContext defines the security options the ephemeral container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext." properties: allowPrivilegeEscalation: - description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows." + description: "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" capabilities: - description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." + description: "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." properties: add: description: "Added capabilities" @@ -2099,27 +2099,27 @@ spec: type: "array" type: "object" privileged: - description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows." + description: "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" procMount: - description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows." + description: "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." type: "string" readOnlyRootFilesystem: - description: "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows." + description: "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -2135,31 +2135,31 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" @@ -2170,13 +2170,13 @@ spec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -2187,7 +2187,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -2196,7 +2196,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2204,7 +2204,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2221,24 +2221,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2251,37 +2251,37 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" stdin: - description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false." + description: "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." type: "boolean" stdinOnce: - description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false" + description: "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" type: "boolean" targetContainerName: - description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. \n The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined." + description: "If set, the name of the container from PodSpec that this ephemeral container targets.\nThe ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.\nIf not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\n\nThe container runtime must implement support for this feature. If the runtime does not\nsupport namespace targeting then the result of setting this field is undefined." type: "string" terminationMessagePath: - description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated." + description: "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." type: "string" terminationMessagePolicy: - description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated." + description: "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." type: "string" tty: - description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false." + description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." type: "boolean" volumeDevices: description: "volumeDevices is the list of block devices to be used by the container." @@ -2300,27 +2300,27 @@ spec: type: "object" type: "array" volumeMounts: - description: "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated." + description: "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.\nCannot be updated." items: description: "VolumeMount describes a mounting of a Volume within a container." properties: mountPath: - description: "Path within the container at which the volume should be mounted. Must not contain ':'." + description: "Path within the container at which the volume should be mounted. Must\nnot contain ':'." type: "string" mountPropagation: - description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10." + description: "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10." type: "string" name: description: "This must match the Name of a Volume." type: "string" readOnly: - description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false." + description: "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." type: "boolean" subPath: - description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root)." + description: "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." type: "string" subPathExpr: - description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive." + description: "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." type: "string" required: - "mountPath" @@ -2328,16 +2328,16 @@ spec: type: "object" type: "array" workingDir: - description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated." + description: "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." type: "string" required: - "name" type: "object" type: "array" hostAliases: - description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods." + description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts\nfile if specified. This is only valid for non-hostNetwork pods." items: - description: "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file." + description: "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the\npod's hosts file." properties: hostnames: description: "Hostnames for the above IP address." @@ -2350,48 +2350,48 @@ spec: type: "object" type: "array" hostIPC: - description: "Use the host's ipc namespace. Optional: Default to false." + description: "Use the host's ipc namespace.\nOptional: Default to false." type: "boolean" hostNetwork: - description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false." + description: "Host networking requested for this pod. Use the host's network namespace.\nIf this option is set, the ports that will be used must be specified.\nDefault to false." type: "boolean" hostPID: - description: "Use the host's pid namespace. Optional: Default to false." + description: "Use the host's pid namespace.\nOptional: Default to false." type: "boolean" hostUsers: - description: "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature." + description: "Use the host's user namespace.\nOptional: Default to true.\nIf set to true or not present, the pod will be run in the host user namespace, useful\nfor when the pod needs a feature only available to the host user namespace, such as\nloading a kernel module with CAP_SYS_MODULE.\nWhen set to false, a new userns is created for the pod. Setting false is useful for\nmitigating container breakout vulnerabilities even allowing users to run their\ncontainers as root without actually having root privileges on the host.\nThis field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature." type: "boolean" hostname: - description: "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value." + description: "Specifies the hostname of the Pod\nIf not specified, the pod's hostname will be set to a system-defined value." type: "string" imagePullSecrets: - description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" + description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.\nIf specified, these secrets will be passed to individual puller implementations for them to use.\nMore info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" items: - description: "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace." + description: "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" type: "array" initContainers: - description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/" + description: "List of initialization containers belonging to the pod.\nInit containers are executed in order prior to containers being started. If any\ninit container fails, the pod is considered to have failed and is handled according\nto its restartPolicy. The name for an init container or normal container must be\nunique among all containers.\nInit containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.\nThe resourceRequirements of an init container are taken into account during scheduling\nby finding the highest request/limit for each resource type, and then using the max of\nof that value or the sum of the normal containers. Limits are applied to init containers\nin a similar fashion.\nInit containers cannot currently be added or removed.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/" items: description: "A single application container that you want to run within a pod." properties: args: - description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Arguments to the entrypoint.\nThe container image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" command: - description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" + description: "Entrypoint array. Not executed within a shell.\nThe container image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will\nproduce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell" items: type: "string" type: "array" env: - description: "List of environment variables to set in the container. Cannot be updated." + description: "List of environment variables to set in the container.\nCannot be updated." items: description: "EnvVar represents an environment variable present in a Container." properties: @@ -2399,7 +2399,7 @@ spec: description: "Name of the environment variable. Must be a C_IDENTIFIER." type: "string" value: - description: "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\"." + description: "Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n\"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\"." type: "string" valueFrom: description: "Source for the environment variable's value. Cannot be used if value is not empty." @@ -2411,7 +2411,7 @@ spec: description: "The key to select." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap or its key must be defined" @@ -2421,7 +2421,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" fieldRef: - description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + description: "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." properties: apiVersion: description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\"." @@ -2434,7 +2434,7 @@ spec: type: "object" x-kubernetes-map-type: "atomic" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -2460,7 +2460,7 @@ spec: description: "The key of the secret to select from. Must be a valid secret key." type: "string" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret or its key must be defined" @@ -2475,7 +2475,7 @@ spec: type: "object" type: "array" envFrom: - description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated." + description: "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated." items: description: "EnvFromSource represents the source of a set of ConfigMaps" properties: @@ -2483,7 +2483,7 @@ spec: description: "The ConfigMap to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the ConfigMap must be defined" @@ -2497,7 +2497,7 @@ spec: description: "The Secret to select from" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "Specify whether the Secret must be defined" @@ -2507,22 +2507,22 @@ spec: type: "object" type: "array" image: - description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets." + description: "Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets." type: "string" imagePullPolicy: - description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images" + description: "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images" type: "string" lifecycle: - description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated." + description: "Actions that the management system should take in response to container lifecycle events.\nCannot be updated." properties: postStart: - description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -2531,7 +2531,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2539,7 +2539,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2556,10 +2556,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -2575,7 +2575,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -2584,20 +2584,20 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" type: "object" preStop: - description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + description: "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" @@ -2606,7 +2606,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2614,7 +2614,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2631,10 +2631,10 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" @@ -2650,7 +2650,7 @@ spec: - "seconds" type: "object" tcpSocket: - description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + description: "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified." properties: host: description: "Optional: Host name to connect to, defaults to the pod IP." @@ -2659,7 +2659,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" @@ -2667,19 +2667,19 @@ spec: type: "object" type: "object" livenessProbe: - description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -2690,7 +2690,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -2699,7 +2699,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2707,7 +2707,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2724,24 +2724,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2754,45 +2754,45 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" name: - description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated." + description: "Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." type: "string" ports: - description: "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated." + description: "List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated." items: description: "ContainerPort represents a network port in a single container." properties: containerPort: - description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536." + description: "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536." format: "int32" type: "integer" hostIP: description: "What host IP to bind the external port to." type: "string" hostPort: - description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this." + description: "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this." format: "int32" type: "integer" name: - description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services." + description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services." type: "string" protocol: default: "TCP" - description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\"." + description: "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\"." type: "string" required: - "containerPort" @@ -2803,19 +2803,19 @@ spec: - "protocol" x-kubernetes-list-type: "map" readinessProbe: - description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -2826,7 +2826,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -2835,7 +2835,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -2843,7 +2843,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -2860,24 +2860,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -2890,17 +2890,17 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" @@ -2910,10 +2910,10 @@ spec: description: "ContainerResizePolicy represents resource resize policy for the container." properties: resourceName: - description: "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory." + description: "Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory." type: "string" restartPolicy: - description: "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired." + description: "Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired." type: "string" required: - "resourceName" @@ -2922,15 +2922,15 @@ spec: type: "array" x-kubernetes-list-type: "atomic" resources: - description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + description: "Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers." items: description: "ResourceClaim references one entry in PodSpec.ResourceClaims." properties: name: - description: "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." + description: "Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container." type: "string" required: - "name" @@ -2946,7 +2946,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -2955,20 +2955,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" restartPolicy: - description: "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed." + description: "RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is \"Always\".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod's restart policy and the container type.\nSetting the RestartPolicy as \"Always\" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy \"Always\"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a \"sidecar\" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted." type: "string" securityContext: - description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + description: "SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" properties: allowPrivilegeEscalation: - description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows." + description: "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" capabilities: - description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." + description: "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows." properties: add: description: "Added capabilities" @@ -2984,27 +2984,27 @@ spec: type: "array" type: "object" privileged: - description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows." + description: "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" procMount: - description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows." + description: "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows." type: "string" readOnlyRootFilesystem: - description: "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows." + description: "Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows." type: "boolean" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -3020,48 +3020,48 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" startupProbe: - description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" properties: exec: description: "Exec specifies the action to take." properties: command: - description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + description: "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy." items: type: "string" type: "array" type: "object" failureThreshold: - description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1." format: "int32" type: "integer" grpc: @@ -3072,7 +3072,7 @@ spec: format: "int32" type: "integer" service: - description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + description: "Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC." type: "string" required: - "port" @@ -3081,7 +3081,7 @@ spec: description: "HTTPGet specifies the http request to perform." properties: host: - description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + description: "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead." type: "string" httpHeaders: description: "Custom headers to set in the request. HTTP allows repeated headers." @@ -3089,7 +3089,7 @@ spec: description: "HTTPHeader describes a custom header to be used in HTTP probes" properties: name: - description: "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header." + description: "The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header." type: "string" value: description: "The header field value" @@ -3106,24 +3106,24 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true scheme: - description: "Scheme to use for connecting to the host. Defaults to HTTP." + description: "Scheme to use for connecting to the host.\nDefaults to HTTP." type: "string" required: - "port" type: "object" initialDelaySeconds: - description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" periodSeconds: - description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + description: "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1." format: "int32" type: "integer" successThreshold: - description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + description: "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1." format: "int32" type: "integer" tcpSocket: @@ -3136,34 +3136,34 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + description: "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." x-kubernetes-int-or-string: true required: - "port" type: "object" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset." + description: "Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset." format: "int64" type: "integer" timeoutSeconds: - description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + description: "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" format: "int32" type: "integer" type: "object" stdin: - description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false." + description: "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false." type: "boolean" stdinOnce: - description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false" + description: "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false" type: "boolean" terminationMessagePath: - description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated." + description: "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated." type: "string" terminationMessagePolicy: - description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated." + description: "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated." type: "string" tty: - description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false." + description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false." type: "boolean" volumeDevices: description: "volumeDevices is the list of block devices to be used by the container." @@ -3182,27 +3182,27 @@ spec: type: "object" type: "array" volumeMounts: - description: "Pod volumes to mount into the container's filesystem. Cannot be updated." + description: "Pod volumes to mount into the container's filesystem.\nCannot be updated." items: description: "VolumeMount describes a mounting of a Volume within a container." properties: mountPath: - description: "Path within the container at which the volume should be mounted. Must not contain ':'." + description: "Path within the container at which the volume should be mounted. Must\nnot contain ':'." type: "string" mountPropagation: - description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10." + description: "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10." type: "string" name: description: "This must match the Name of a Volume." type: "string" readOnly: - description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false." + description: "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false." type: "boolean" subPath: - description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root)." + description: "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root)." type: "string" subPathExpr: - description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive." + description: "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive." type: "string" required: - "mountPath" @@ -3210,26 +3210,26 @@ spec: type: "object" type: "array" workingDir: - description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated." + description: "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated." type: "string" required: - "name" type: "object" type: "array" nodeName: - description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements." + description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty,\nthe scheduler simply schedules this pod onto that node, assuming that it fits resource\nrequirements." type: "string" nodeSelector: additionalProperties: type: "string" - description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + description: "NodeSelector is a selector which must be true for the pod to fit on a node.\nSelector which must match a node's labels for the pod to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" type: "object" x-kubernetes-map-type: "atomic" os: - description: "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + description: "Specifies the OS of the containers in the pod.\nSome pod and container fields are restricted if this is set.\n\n\nIf the OS field is set to linux, the following fields must be unset:\n-securityContext.windowsOptions\n\n\nIf the OS field is set to windows, following fields must be unset:\n- spec.hostPID\n- spec.hostIPC\n- spec.hostUsers\n- spec.securityContext.seLinuxOptions\n- spec.securityContext.seccompProfile\n- spec.securityContext.fsGroup\n- spec.securityContext.fsGroupChangePolicy\n- spec.securityContext.sysctls\n- spec.shareProcessNamespace\n- spec.securityContext.runAsUser\n- spec.securityContext.runAsGroup\n- spec.securityContext.supplementalGroups\n- spec.containers[*].securityContext.seLinuxOptions\n- spec.containers[*].securityContext.seccompProfile\n- spec.containers[*].securityContext.capabilities\n- spec.containers[*].securityContext.readOnlyRootFilesystem\n- spec.containers[*].securityContext.privileged\n- spec.containers[*].securityContext.allowPrivilegeEscalation\n- spec.containers[*].securityContext.procMount\n- spec.containers[*].securityContext.runAsUser\n- spec.containers[*].securityContext.runAsGroup" properties: name: - description: "Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null" + description: "Name is the name of the operating system. The currently supported values are linux and windows.\nAdditional value may be defined in future and can be one of:\nhttps://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration\nClients should expect to handle additional values and treat unrecognized values in this field as os: null" type: "string" required: - "name" @@ -3241,20 +3241,20 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md" + description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.\nThis field will be autopopulated at admission time by the RuntimeClass admission controller. If\nthe RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.\nThe RuntimeClass admission controller will reject Pod create requests which have the overhead already\nset. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value\ndefined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md" type: "object" preemptionPolicy: - description: "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset." + description: "PreemptionPolicy is the Policy for preempting pods with lower priority.\nOne of Never, PreemptLowerPriority.\nDefaults to PreemptLowerPriority if unset." type: "string" priority: - description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority." + description: "The priority value. Various system components use this field to find the\npriority of the pod. When Priority Admission Controller is enabled, it\nprevents users from setting this field. The admission controller populates\nthis field from PriorityClassName.\nThe higher the value, the higher the priority." format: "int32" type: "integer" priorityClassName: - description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default." + description: "If specified, indicates the pod's priority. \"system-node-critical\" and\n\"system-cluster-critical\" are two special keywords which indicate the\nhighest priorities with the former being the highest priority. Any other\nname must be defined by creating a PriorityClass object with that name.\nIf not specified, the pod priority will be default or zero if there is no\ndefault." type: "string" readinessGates: - description: "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates" + description: "If specified, all readiness gates will be evaluated for pod readiness.\nA pod is ready when all its containers are ready AND\nall conditions specified in the readiness gates have status equal to \"True\"\nMore info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates" items: description: "PodReadinessGate contains the reference to a pod condition" properties: @@ -3266,21 +3266,21 @@ spec: type: "object" type: "array" resourceClaims: - description: "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable." + description: "ResourceClaims defines which ResourceClaims must be allocated\nand reserved before the Pod is allowed to start. The resources\nwill be made available to those containers which consume them\nby name.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable." items: - description: "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name." + description: "PodResourceClaim references exactly one ResourceClaim through a ClaimSource.\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod.\nContainers that need access to the ResourceClaim reference it with this name." properties: name: - description: "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL." + description: "Name uniquely identifies this resource claim inside the pod.\nThis must be a DNS_LABEL." type: "string" source: description: "Source describes where to find the ResourceClaim." properties: resourceClaimName: - description: "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod." + description: "ResourceClaimName is the name of a ResourceClaim object in the same\nnamespace as this pod." type: "string" resourceClaimTemplateName: - description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. \n The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. \n This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim." + description: "ResourceClaimTemplateName is the name of a ResourceClaimTemplate\nobject in the same namespace as this pod.\n\n\nThe template will be used to create a new ResourceClaim, which will\nbe bound to this pod. When this pod is deleted, the ResourceClaim\nwill also be deleted. The pod name and resource name, along with a\ngenerated component, will be used to form a unique name for the\nResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\n\nThis field is immutable and no changes will be made to the\ncorresponding ResourceClaim by the control plane after creating the\nResourceClaim." type: "string" type: "object" required: @@ -3291,21 +3291,21 @@ spec: - "name" x-kubernetes-list-type: "map" restartPolicy: - description: "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy" + description: "Restart policy for all containers within the pod.\nOne of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.\nDefault to Always.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy" type: "string" runtimeClassName: - description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class" + description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used\nto run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.\nIf unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an\nempty definition that uses the default runtime handler.\nMore info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class" type: "string" schedulerName: - description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler." + description: "If specified, the pod will be dispatched by specified scheduler.\nIf not specified, the pod will be dispatched by default scheduler." type: "string" schedulingGates: - description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. \n SchedulingGates can only be set at pod creation time, and be removed only afterwards. \n This is a beta feature enabled by the PodSchedulingReadiness feature gate." + description: "SchedulingGates is an opaque list of values that if specified will block scheduling the pod.\nIf schedulingGates is not empty, the pod will stay in the SchedulingGated state and the\nscheduler will not attempt to schedule the pod.\n\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate." items: description: "PodSchedulingGate is associated to a Pod to guard its scheduling." properties: name: - description: "Name of the scheduling gate. Each scheduling gate must have a unique name field." + description: "Name of the scheduling gate.\nEach scheduling gate must have a unique name field." type: "string" required: - "name" @@ -3315,28 +3315,28 @@ spec: - "name" x-kubernetes-list-type: "map" securityContext: - description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + description: "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field." properties: fsGroup: - description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + description: "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:\n\n\n1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" fsGroupChangePolicy: - description: "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows." + description: "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume\nbefore being exposed inside Pod. This field will only apply to\nvolume types which support fsGroup based ownership(and permissions).\nIt will have no effect on ephemeral volume types such as: secret, configmaps\nand emptydir.\nValid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used.\nNote that this field cannot be set when spec.os.name is windows." type: "string" runAsGroup: - description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows." + description: "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" runAsNonRoot: - description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "boolean" runAsUser: - description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows." + description: "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\nNote that this field cannot be set when spec.os.name is windows." format: "int64" type: "integer" seLinuxOptions: - description: "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows." + description: "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\nNote that this field cannot be set when spec.os.name is windows." properties: level: description: "Level is SELinux level label that applies to the container." @@ -3352,25 +3352,25 @@ spec: type: "string" type: "object" seccompProfile: - description: "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows." + description: "The seccomp options to use by the containers in this pod.\nNote that this field cannot be set when spec.os.name is windows." properties: localhostProfile: - description: "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type." + description: "localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet's configured seccomp profile location.\nMust be set if type is \"Localhost\". Must NOT be set for any other type." type: "string" type: - description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + description: "type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied." type: "string" required: - "type" type: "object" supplementalGroups: - description: "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows." + description: "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID, the fsGroup (if specified), and group memberships\ndefined in the container image for the uid of the container process. If unspecified,\nno additional groups are added to any container. Note that group memberships\ndefined in the container image for the uid of the container process are still effective,\neven if they are not included in this list.\nNote that this field cannot be set when spec.os.name is windows." items: format: "int64" type: "integer" type: "array" sysctls: - description: "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows." + description: "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\nNote that this field cannot be set when spec.os.name is windows." items: description: "Sysctl defines a kernel parameter to be set" properties: @@ -3386,85 +3386,85 @@ spec: type: "object" type: "array" windowsOptions: - description: "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + description: "The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux." properties: gmsaCredentialSpec: - description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field." + description: "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field." type: "string" gmsaCredentialSpecName: description: "GMSACredentialSpecName is the name of the GMSA credential spec to use." type: "string" hostProcess: - description: "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true." + description: "HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true." type: "boolean" runAsUserName: - description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence." + description: "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence." type: "string" type: "object" type: "object" serviceAccount: - description: "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead." + description: "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.\nDeprecated: Use serviceAccountName instead." type: "string" serviceAccountName: - description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" + description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/" type: "string" setHostnameAsFQDN: - description: "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false." + description: "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).\nIn Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).\nIn Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN.\nIf a pod does not have FQDN, this has no effect.\nDefault to false." type: "boolean" shareProcessNamespace: - description: "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false." + description: "Share a single process namespace between all of the containers in a pod.\nWhen this is set containers will be able to view and signal processes from other containers\nin the same pod, and the first process in each container will not be assigned PID 1.\nHostPID and ShareProcessNamespace cannot both be set.\nOptional: Default to false." type: "boolean" subdomain: - description: "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all." + description: "If specified, the fully qualified Pod hostname will be \"...svc.\".\nIf not specified, the pod will not have a domainname at all." type: "string" terminationGracePeriodSeconds: - description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds." + description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nIf this value is nil, the default grace period will be used instead.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nDefaults to 30 seconds." format: "int64" type: "integer" tolerations: description: "If specified, the pod's tolerations." items: - description: "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator ." + description: "The pod this Toleration is attached to tolerates any taint that matches\nthe triple using the matching operator ." properties: effect: - description: "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." + description: "Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." type: "string" key: - description: "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys." + description: "Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys." type: "string" operator: - description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category." + description: "Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category." type: "string" tolerationSeconds: - description: "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system." + description: "TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system." format: "int64" type: "integer" value: - description: "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string." + description: "Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string." type: "string" type: "object" type: "array" topologySpreadConstraints: - description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed." + description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology\ndomains. Scheduler will schedule pods in a way which abides by the constraints.\nAll topologySpreadConstraints are ANDed." items: description: "TopologySpreadConstraint specifies how to spread matching pods among the given topology." properties: labelSelector: - description: "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain." + description: "LabelSelector is used to find matching pods.\nPods that match this label selector are counted to determine the number of pods\nin their corresponding topology domain." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -3476,35 +3476,35 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" matchLabelKeys: - description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." + description: "MatchLabelKeys is a set of pod label keys to select the pods over which\nspreading will be calculated. The keys are used to lookup values from the\nincoming pod labels, those key-value labels are ANDed with labelSelector\nto select the group of existing pods over which spreading will be calculated\nfor the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.\nMatchLabelKeys cannot be set when LabelSelector isn't set.\nKeys that don't exist in the incoming pod labels will\nbe ignored. A null or empty list means only match against labelSelector.\n\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." items: type: "string" type: "array" x-kubernetes-list-type: "atomic" maxSkew: - description: "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed." + description: "MaxSkew describes the degree to which pods may be unevenly distributed.\nWhen `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference\nbetween the number of matching pods in the target topology and the global minimum.\nThe global minimum is the minimum number of matching pods in an eligible domain\nor zero if the number of eligible domains is less than MinDomains.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 2/2/1:\nIn this case, the global minimum is 1.\n| zone1 | zone2 | zone3 |\n| P P | P P | P |\n- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;\nscheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)\nviolate MaxSkew(1).\n- if MaxSkew is 2, incoming pod can be scheduled onto any zone.\nWhen `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence\nto topologies that satisfy it.\nIt's a required field. Default value is 1 and 0 is not allowed." format: "int32" type: "integer" minDomains: - description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + description: "MinDomains indicates a minimum number of eligible domains.\nWhen the number of eligible domains with matching topology keys is less than minDomains,\nPod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed.\nAnd when the number of eligible domains with matching topology keys equals or greater than minDomains,\nthis value has no effect on scheduling.\nAs a result, when the number of eligible domains is less than minDomains,\nscheduler won't schedule more than maxSkew Pods to those domains.\nIf value is nil, the constraint behaves as if MinDomains is equal to 1.\nValid values are integers greater than 0.\nWhen value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same\nlabelSelector spread as 2/2/2:\n| zone1 | zone2 | zone3 |\n| P P | P P | P P |\nThe number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0.\nIn this situation, new pod with the same labelSelector cannot be scheduled,\nbecause computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,\nit will violate MaxSkew.\n\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." format: "int32" type: "integer" nodeAffinityPolicy: - description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector\nwhen calculating pod topology spread skew. Options are:\n- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.\n- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\n\nIf this value is nil, the behavior is equivalent to the Honor policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: "string" nodeTaintsPolicy: - description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating\npod topology spread skew. Options are:\n- Honor: nodes without taints, along with tainted nodes for which the incoming pod\nhas a toleration, are included.\n- Ignore: node taints are ignored. All nodes are included.\n\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.\nThis is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." type: "string" topologyKey: - description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field." + description: "TopologyKey is the key of node labels. Nodes that have a label with this key\nand identical values are considered to be in the same topology.\nWe consider each as a \"bucket\", and try to put balanced number\nof pods into each bucket.\nWe define a domain as a particular instance of a topology.\nAlso, we define an eligible domain as a domain whose nodes meet the requirements of\nnodeAffinityPolicy and nodeTaintsPolicy.\ne.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology.\nAnd, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology.\nIt's a required field." type: "string" whenUnsatisfiable: - description: "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field." + description: "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy\nthe spread constraint.\n- DoNotSchedule (default) tells the scheduler not to schedule it.\n- ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod\nif and only if every possible node assignment for that pod would violate\n\"MaxSkew\" on some topology.\nFor example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same\nlabelSelector spread as 3/1/1:\n| zone1 | zone2 | zone3 |\n| P P P | P | P |\nIf WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled\nto zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies\nMaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler\nwon't make it *more* imbalanced.\nIt's a required field." type: "string" required: - "maxSkew" @@ -3517,25 +3517,25 @@ spec: - "whenUnsatisfiable" x-kubernetes-list-type: "map" volumes: - description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes" + description: "List of volumes that can be mounted by containers belonging to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes" items: description: "Volume represents a named volume in a pod that may be accessed by any container in the pod." properties: awsElasticBlockStore: - description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" properties: fsType: - description: "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" partition: - description: "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty)." + description: "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty)." format: "int32" type: "integer" readOnly: - description: "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + description: "readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" type: "boolean" volumeID: - description: "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + description: "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" type: "string" required: - "volumeID" @@ -3553,13 +3553,13 @@ spec: description: "diskURI is the URI of data disk in the blob storage" type: "string" fsType: - description: "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" kind: description: "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" type: "string" readOnly: - description: "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" required: - "diskName" @@ -3569,7 +3569,7 @@ spec: description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod." properties: readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretName: description: "secretName is the name of secret that contains Azure Storage Account Name and Key" @@ -3585,7 +3585,7 @@ spec: description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime" properties: monitors: - description: "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" items: type: "string" type: "array" @@ -3593,44 +3593,44 @@ spec: description: "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /" type: "string" readOnly: - description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" type: "boolean" secretFile: - description: "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" type: "string" secretRef: - description: "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" user: - description: "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + description: "user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" type: "string" required: - "monitors" type: "object" cinder: - description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" type: "string" readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" type: "boolean" secretRef: - description: "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack." + description: "secretRef is optional: points to a secret object containing parameters used to connect\nto OpenStack." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" volumeID: - description: "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + description: "volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" type: "string" required: - "volumeID" @@ -3639,11 +3639,11 @@ spec: description: "configMap represents a configMap that should populate this volume" properties: defaultMode: - description: "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" items: - description: "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -3651,11 +3651,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -3663,7 +3663,7 @@ spec: type: "object" type: "array" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "optional specify whether the ConfigMap or its keys must be defined" @@ -3674,26 +3674,26 @@ spec: description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)." properties: driver: - description: "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster." + description: "driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." type: "string" fsType: - description: "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply." + description: "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply." type: "string" nodePublishSecretRef: - description: "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed." + description: "nodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" readOnly: - description: "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write)." + description: "readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write)." type: "boolean" volumeAttributes: additionalProperties: type: "string" - description: "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values." + description: "volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values." type: "object" required: - "driver" @@ -3702,7 +3702,7 @@ spec: description: "downwardAPI represents downward API about the pod that should populate this volume" properties: defaultMode: - description: "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "Optional: mode bits to use on created files by default. Must be a\nOptional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" items: @@ -3724,14 +3724,14 @@ spec: type: "object" x-kubernetes-map-type: "atomic" mode: - description: "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: description: "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" type: "string" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -3756,27 +3756,27 @@ spec: type: "array" type: "object" emptyDir: - description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + description: "emptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" properties: medium: - description: "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + description: "medium represents what type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" type: "string" sizeLimit: anyOf: - type: "integer" - type: "string" - description: "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + description: "sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true type: "object" ephemeral: - description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + description: "ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time." properties: volumeClaimTemplate: - description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + description: "Will be used to create a stand-alone PVC to provision the volume.\nThe pod in which this EphemeralVolumeSource is embedded will be the\nowner of the PVC, i.e. the PVC will be deleted together with the\npod. The name of the PVC will be `-` where\n`` is the name from the `PodSpec.Volumes` array\nentry. Pod validation will reject the pod if the concatenated name\nis not valid for a PVC (for example, too long).\n\n\nAn existing PVC with that name that is not owned by the pod\nwill *not* be used for the pod to avoid using an unrelated\nvolume by mistake. Starting the pod is then blocked until\nthe unrelated PVC is removed. If such a pre-created PVC is\nmeant to be used by the pod, the PVC has to updated with an\nowner reference to the pod once the pod exists. Normally\nthis should not be necessary, but it may be useful when\nmanually reconstructing a broken cluster.\n\n\nThis field is read-only and no changes will be made by Kubernetes\nto the PVC after it has been created.\n\n\nRequired, must not be nil." properties: metadata: - description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation." + description: "May contain labels and annotations that will be copied into the PVC\nwhen creating it. No other fields are allowed and will be rejected during\nvalidation." properties: annotations: additionalProperties: @@ -3796,18 +3796,18 @@ spec: type: "string" type: "object" spec: - description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here." + description: "The specification for the PersistentVolumeClaim. The entire content is\ncopied unchanged into the PVC that gets created from this\ntemplate. The same fields as in a PersistentVolumeClaim\nare also valid here." properties: accessModes: - description: "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" + description: "accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1" items: type: "string" type: "array" dataSource: - description: "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource." + description: "dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource." properties: apiGroup: - description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required." + description: "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." type: "string" kind: description: "Kind is the type of resource being referenced" @@ -3821,10 +3821,10 @@ spec: type: "object" x-kubernetes-map-type: "atomic" dataSourceRef: - description: "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + description: "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the dataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, when namespace isn't specified in dataSourceRef,\nboth fields (dataSource and dataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nWhen namespace is specified in dataSourceRef,\ndataSource isn't set to the same value and must be empty.\nThere are three important differences between dataSource and dataSourceRef:\n* While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." properties: apiGroup: - description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required." + description: "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required." type: "string" kind: description: "Kind is the type of resource being referenced" @@ -3833,14 +3833,14 @@ spec: description: "Name is the name of resource being referenced" type: "string" namespace: - description: "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + description: "Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled." type: "string" required: - "kind" - "name" type: "object" resources: - description: "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + description: "resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" properties: limits: additionalProperties: @@ -3849,7 +3849,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" requests: additionalProperties: @@ -3858,7 +3858,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + description: "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" type: "object" type: "object" selector: @@ -3867,16 +3867,16 @@ spec: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -3888,18 +3888,18 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" storageClassName: - description: "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1" + description: "storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1" type: "string" volumeAttributesClassName: - description: "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled." + description: "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.\nIf specified, the CSI driver will create or update the volume with the attributes defined\nin the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,\nit can be changed after the claim is created. An empty string value means that no VolumeAttributesClass\nwill be applied to the claim but it's not allowed to reset this field to empty string once it is set.\nIf unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass\nwill be set by the persistentvolume controller if it exists.\nIf the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be\nset to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource\nexists.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass\n(Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled." type: "string" volumeMode: - description: "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec." + description: "volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec." type: "string" volumeName: description: "volumeName is the binding reference to the PersistentVolume backing this claim." @@ -3913,14 +3913,14 @@ spec: description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" lun: description: "lun is Optional: FC target lun number" format: "int32" type: "integer" readOnly: - description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" targetWWNs: description: "targetWWNs is Optional: FC target worldwide names (WWNs)" @@ -3928,19 +3928,19 @@ spec: type: "string" type: "array" wwids: - description: "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously." + description: "wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously." items: type: "string" type: "array" type: "object" flexVolume: - description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." + description: "flexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." properties: driver: description: "driver is the name of the driver to use for this volume." type: "string" fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script." type: "string" options: additionalProperties: @@ -3948,13 +3948,13 @@ spec: description: "options is Optional: this field holds extra command options if any." type: "object" readOnly: - description: "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretRef: - description: "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." + description: "secretRef is Optional: secretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" @@ -3965,36 +3965,36 @@ spec: description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running" properties: datasetName: - description: "datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated" + description: "datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\nshould be considered as deprecated" type: "string" datasetUUID: description: "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset" type: "string" type: "object" gcePersistentDisk: - description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "gcePersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" properties: fsType: - description: "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" partition: - description: "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" format: "int32" type: "integer" pdName: - description: "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" type: "string" readOnly: - description: "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + description: "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" type: "boolean" required: - "pdName" type: "object" gitRepo: - description: "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." + description: "gitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." properties: directory: - description: "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name." + description: "directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name." type: "string" repository: description: "repository is the URL" @@ -4006,35 +4006,35 @@ spec: - "repository" type: "object" glusterfs: - description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md" + description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md" properties: endpoints: - description: "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + description: "endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" type: "string" path: - description: "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + description: "path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" type: "string" readOnly: - description: "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + description: "readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" type: "boolean" required: - "endpoints" - "path" type: "object" hostPath: - description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write." + description: "hostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write." properties: path: - description: "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + description: "path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" type: "string" type: - description: "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + description: "type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" type: "string" required: - "path" type: "object" iscsi: - description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md" + description: "iscsi represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md" properties: chapAuthDiscovery: description: "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication" @@ -4043,39 +4043,39 @@ spec: description: "chapAuthSession defines whether support iSCSI Session CHAP authentication" type: "boolean" fsType: - description: "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" initiatorName: - description: "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection." + description: "initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n: will be created for the connection." type: "string" iqn: description: "iqn is the target iSCSI Qualified Name." type: "string" iscsiInterface: - description: "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp)." + description: "iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp)." type: "string" lun: description: "lun represents iSCSI Target Lun number." format: "int32" type: "integer" portals: - description: "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)." + description: "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." items: type: "string" type: "array" readOnly: - description: "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false." + description: "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false." type: "boolean" secretRef: description: "secretRef is the CHAP Secret for iSCSI target and initiator authentication" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" targetPortal: - description: "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260)." + description: "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." type: "string" required: - "iqn" @@ -4083,32 +4083,32 @@ spec: - "targetPortal" type: "object" name: - description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + description: "name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" type: "string" nfs: - description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "nfs represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" properties: path: - description: "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" type: "string" readOnly: - description: "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" type: "boolean" server: - description: "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + description: "server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" type: "string" required: - "path" - "server" type: "object" persistentVolumeClaim: - description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + description: "persistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" properties: claimName: - description: "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + description: "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" type: "string" readOnly: - description: "readOnly Will force the ReadOnly setting in VolumeMounts. Default false." + description: "readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false." type: "boolean" required: - "claimName" @@ -4117,7 +4117,7 @@ spec: description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" pdID: description: "pdID is the ID that identifies Photon Controller persistent disk" @@ -4129,10 +4129,10 @@ spec: description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine" properties: fsType: - description: "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" volumeID: description: "volumeID uniquely identifies a Portworx volume" @@ -4144,7 +4144,7 @@ spec: description: "projected items for all in one resources secrets, configmaps, and downward API" properties: defaultMode: - description: "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" sources: @@ -4153,24 +4153,24 @@ spec: description: "Projection that may be projected along with other supported volume types" properties: clusterTrustBundle: - description: "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. \n Alpha, gated by the ClusterTrustBundleProjection feature gate. \n ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. \n Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time." + description: "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field\nof ClusterTrustBundle objects in an auto-updating file.\n\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\n\nClusterTrustBundle objects can either be selected by name, or by the\ncombination of signer name and a label selector.\n\n\nKubelet performs aggressive normalization of the PEM contents written\ninto the pod filesystem. Esoteric PEM features such as inter-block\ncomments and block headers are stripped. Certificates are deduplicated.\nThe ordering of certificates within the file is arbitrary, and Kubelet\nmay change the order over time." properties: labelSelector: - description: "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\"." + description: "Select all ClusterTrustBundles that match this label selector. Only has\neffect if signerName is set. Mutually-exclusive with name. If unset,\ninterpreted as \"match nothing\". If set but empty, interpreted as \"match\neverything\"." properties: matchExpressions: description: "matchExpressions is a list of label selector requirements. The requirements are ANDed." items: - description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + description: "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." properties: key: description: "key is the label key that the selector applies to." type: "string" operator: - description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + description: "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." type: "string" values: - description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + description: "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch." items: type: "string" type: "array" @@ -4182,21 +4182,21 @@ spec: matchLabels: additionalProperties: type: "string" - description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed." type: "object" type: "object" x-kubernetes-map-type: "atomic" name: - description: "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector." + description: "Select a single ClusterTrustBundle by object name. Mutually-exclusive\nwith signerName and labelSelector." type: "string" optional: - description: "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles." + description: "If true, don't block pod startup if the referenced ClusterTrustBundle(s)\naren't available. If using name, then the named ClusterTrustBundle is\nallowed not to exist. If using signerName, then the combination of\nsignerName and labelSelector is allowed to match zero\nClusterTrustBundles." type: "boolean" path: description: "Relative path from the volume root to write the bundle." type: "string" signerName: - description: "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated." + description: "Select all ClusterTrustBundles that match this signer name.\nMutually-exclusive with name. The contents of all selected\nClusterTrustBundles will be unified and deduplicated." type: "string" required: - "path" @@ -4205,7 +4205,7 @@ spec: description: "configMap information about the configMap data to project" properties: items: - description: "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -4213,11 +4213,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -4225,7 +4225,7 @@ spec: type: "object" type: "array" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "optional specify whether the ConfigMap or its keys must be defined" @@ -4254,14 +4254,14 @@ spec: type: "object" x-kubernetes-map-type: "atomic" mode: - description: "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: description: "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" type: "string" resourceFieldRef: - description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + description: "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." properties: containerName: description: "Container name: required for volumes, optional for env vars" @@ -4289,7 +4289,7 @@ spec: description: "secret information about the secret data to project" properties: items: - description: "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -4297,11 +4297,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -4309,7 +4309,7 @@ spec: type: "object" type: "array" name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" optional: description: "optional field specify whether the Secret or its key must be defined" @@ -4320,14 +4320,14 @@ spec: description: "serviceAccountToken is information about the serviceAccountToken data to project" properties: audience: - description: "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver." + description: "audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver." type: "string" expirationSeconds: - description: "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes." + description: "expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes." format: "int64" type: "integer" path: - description: "path is the path relative to the mount point of the file to project the token into." + description: "path is the path relative to the mount point of the file to project the\ntoken into." type: "string" required: - "path" @@ -4339,19 +4339,19 @@ spec: description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime" properties: group: - description: "group to map volume access to Default is no group" + description: "group to map volume access to\nDefault is no group" type: "string" readOnly: - description: "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false." + description: "readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false." type: "boolean" registry: - description: "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes" + description: "registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" type: "string" tenant: - description: "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin" + description: "tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin" type: "string" user: - description: "user to map volume access to Defaults to serivceaccount user" + description: "user to map volume access to\nDefaults to serivceaccount user" type: "string" volume: description: "volume is a string that references an already created Quobyte volume by name." @@ -4361,38 +4361,38 @@ spec: - "volume" type: "object" rbd: - description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md" + description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md" properties: fsType: - description: "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine" + description: "fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine" type: "string" image: - description: "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" keyring: - description: "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" monitors: - description: "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" items: type: "string" type: "array" pool: - description: "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" readOnly: - description: "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "boolean" secretRef: - description: "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "secretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" user: - description: "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + description: "user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" type: "string" required: - "image" @@ -4402,7 +4402,7 @@ spec: description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\"." type: "string" gateway: description: "gateway is the host address of the ScaleIO API Gateway." @@ -4411,13 +4411,13 @@ spec: description: "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage." type: "string" readOnly: - description: "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretRef: - description: "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail." + description: "secretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" @@ -4425,7 +4425,7 @@ spec: description: "sslEnabled Flag enable/disable SSL communication with Gateway, default false" type: "boolean" storageMode: - description: "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned." + description: "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned." type: "string" storagePool: description: "storagePool is the ScaleIO Storage Pool associated with the protection domain." @@ -4434,7 +4434,7 @@ spec: description: "system is the name of the storage system as configured in ScaleIO." type: "string" volumeName: - description: "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source." + description: "volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source." type: "string" required: - "gateway" @@ -4442,14 +4442,14 @@ spec: - "system" type: "object" secret: - description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + description: "secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" properties: defaultMode: - description: "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" items: - description: "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'." + description: "items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'." items: description: "Maps a string key to a path within a volume." properties: @@ -4457,11 +4457,11 @@ spec: description: "key is the key to project." type: "string" mode: - description: "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set." + description: "mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set." format: "int32" type: "integer" path: - description: "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'." + description: "path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." type: "string" required: - "key" @@ -4472,38 +4472,38 @@ spec: description: "optional field specify whether the Secret or its keys must be defined" type: "boolean" secretName: - description: "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + description: "secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret" type: "string" type: "object" storageos: description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." properties: fsType: - description: "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" readOnly: - description: "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts." + description: "readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts." type: "boolean" secretRef: - description: "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted." + description: "secretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted." properties: name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" + description: "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?" type: "string" type: "object" x-kubernetes-map-type: "atomic" volumeName: - description: "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace." + description: "volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." type: "string" volumeNamespace: - description: "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created." + description: "volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created." type: "string" type: "object" vsphereVolume: description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" properties: fsType: - description: "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + description: "fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." type: "string" storagePolicyID: description: "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName." @@ -4537,22 +4537,22 @@ spec: - "name" x-kubernetes-list-type: "map" priority: - description: "Priority determines the order of access to the resources managed by the ClusterQueue where the workload is queued. The priority value is populated from PriorityClassName. The higher the value, the higher the priority. If priorityClassName is specified, priority must not be null." + description: "Priority determines the order of access to the resources managed by the\nClusterQueue where the workload is queued.\nThe priority value is populated from PriorityClassName.\nThe higher the value, the higher the priority.\nIf priorityClassName is specified, priority must not be null." format: "int32" type: "integer" priorityClassName: - description: "If specified, indicates the workload's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the workload priority will be default or zero if there is no default." + description: "If specified, indicates the workload's priority.\n\"system-node-critical\" and \"system-cluster-critical\" are two special\nkeywords which indicate the highest priorities with the former being\nthe highest priority. Any other name must be defined by creating a\nPriorityClass object with that name. If not specified, the workload\npriority will be default or zero if there is no default." type: "string" priorityClassSource: default: "" - description: "priorityClassSource determines whether the priorityClass field refers to a pod PriorityClass or kueue.x-k8s.io/workloadpriorityclass. Workload's PriorityClass can accept the name of a pod priorityClass or a workloadPriorityClass. When using pod PriorityClass, a priorityClassSource field has the scheduling.k8s.io/priorityclass value." + description: "priorityClassSource determines whether the priorityClass field refers to a pod PriorityClass or kueue.x-k8s.io/workloadpriorityclass.\nWorkload's PriorityClass can accept the name of a pod priorityClass or a workloadPriorityClass.\nWhen using pod PriorityClass, a priorityClassSource field has the scheduling.k8s.io/priorityclass value." enum: - "kueue.x-k8s.io/workloadpriorityclass" - "scheduling.k8s.io/priorityclass" - "" type: "string" queueName: - description: "queueName is the name of the LocalQueue the Workload is associated with. queueName cannot be changed while .status.admission is not null." + description: "queueName is the name of the LocalQueue the Workload is associated with.\nqueueName cannot be changed while .status.admission is not null." type: "string" required: - "podSets" @@ -4561,7 +4561,7 @@ spec: description: "WorkloadStatus defines the observed state of Workload" properties: admission: - description: "admission holds the parameters of the admission of the workload by a ClusterQueue. admission can be set back to null, but its fields cannot be changed once set." + description: "admission holds the parameters of the admission of the workload by a\nClusterQueue. admission can be set back to null, but its fields cannot be\nchanged once set." properties: clusterQueue: description: "clusterQueue is the name of the ClusterQueue that admitted this workload." @@ -4571,7 +4571,7 @@ spec: items: properties: count: - description: "count is the number of pods taken into account at admission time. This field will not change in case of quota reclaim. Value could be missing for Workloads created before this field was added, in that case spec.podSets[*].count value will be used." + description: "count is the number of pods taken into account at admission time.\nThis field will not change in case of quota reclaim.\nValue could be missing for Workloads created before this field was added,\nin that case spec.podSets[*].count value will be used." format: "int32" minimum: 0.0 type: "integer" @@ -4592,7 +4592,7 @@ spec: - type: "string" pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" x-kubernetes-int-or-string: true - description: "resourceUsage keeps track of the total resources all the pods in the podset need to run. \n Beside what is provided in podSet's specs, this calculation takes into account the LimitRange defaults and RuntimeClass overheads at the moment of admission. This field will not change in case of quota reclaim." + description: "resourceUsage keeps track of the total resources all the pods in the podset need to run.\n\n\nBeside what is provided in podSet's specs, this calculation takes into account\nthe LimitRange defaults and RuntimeClass overheads at the moment of admission.\nThis field will not change in case of quota reclaim." type: "object" required: - "name" @@ -4610,11 +4610,11 @@ spec: items: properties: lastTransitionTime: - description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + description: "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." format: "date-time" type: "string" message: - description: "message is a human readable message indicating details about the transition. This may be an empty string." + description: "message is a human readable message indicating details about the transition.\nThis may be an empty string." maxLength: 32768 type: "string" name: @@ -4623,7 +4623,7 @@ spec: type: "string" podSetUpdates: items: - description: "PodSetUpdate contains a list of pod set modifications suggested by AdmissionChecks. The modifications should be additive only - modifications of already existing keys or having the same key provided by multiple AdmissionChecks is not allowed and will result in failure during workload admission." + description: "PodSetUpdate contains a list of pod set modifications suggested by AdmissionChecks.\nThe modifications should be additive only - modifications of already existing keys\nor having the same key provided by multiple AdmissionChecks is not allowed and will\nresult in failure during workload admission." properties: annotations: additionalProperties: @@ -4642,23 +4642,23 @@ spec: type: "object" tolerations: items: - description: "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator ." + description: "The pod this Toleration is attached to tolerates any taint that matches\nthe triple using the matching operator ." properties: effect: - description: "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." + description: "Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute." type: "string" key: - description: "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys." + description: "Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys." type: "string" operator: - description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category." + description: "Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category." type: "string" tolerationSeconds: - description: "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system." + description: "TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system." format: "int64" type: "integer" value: - description: "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string." + description: "Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string." type: "string" type: "object" type: "array" @@ -4686,25 +4686,25 @@ spec: - "name" x-kubernetes-list-type: "map" conditions: - description: "conditions hold the latest available observations of the Workload current state. \n The type of the condition could be: \n - Admitted: the Workload was admitted through a ClusterQueue. - Finished: the associated workload finished running (failed or succeeded). - PodsReady: at least `.spec.podSets[*].count` Pods are ready or have succeeded." + description: "conditions hold the latest available observations of the Workload\ncurrent state.\n\n\nThe type of the condition could be:\n\n\n- Admitted: the Workload was admitted through a ClusterQueue.\n- Finished: the associated workload finished running (failed or succeeded).\n- PodsReady: at least `.spec.podSets[*].count` Pods are ready or have\nsucceeded." items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" properties: lastTransitionTime: - description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + description: "lastTransitionTime is the last time the condition transitioned from one status to another.\nThis should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." format: "date-time" type: "string" message: - description: "message is a human readable message indicating details about the transition. This may be an empty string." + description: "message is a human readable message indicating details about the transition.\nThis may be an empty string." maxLength: 32768 type: "string" observedGeneration: - description: "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance." + description: "observedGeneration represents the .metadata.generation that the condition was set based upon.\nFor instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date\nwith respect to the current state of the instance." format: "int64" minimum: 0.0 type: "integer" reason: - description: "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty." + description: "reason contains a programmatic identifier indicating the reason for the condition's last transition.\nProducers of specific condition types may define expected values and meanings for this field,\nand whether the values are considered a guaranteed API.\nThe value should be a CamelCase string.\nThis field may not be empty." maxLength: 1024 minLength: 1 pattern: "^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$" @@ -4717,7 +4717,7 @@ spec: - "Unknown" type: "string" type: - description: "type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" + description: "type of condition in CamelCase or in foo.example.com/CamelCase.\n---\nMany .condition.type values are consistent across resources like Available, but because arbitrary conditions can be\nuseful (see .node.status.conditions), the ability to deconflict is important.\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)" maxLength: 316 pattern: "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$" type: "string" @@ -4733,7 +4733,7 @@ spec: - "type" x-kubernetes-list-type: "map" reclaimablePods: - description: "reclaimablePods keeps track of the number pods within a podset for which the resource reservation is no longer needed." + description: "reclaimablePods keeps track of the number pods within a podset for which\nthe resource reservation is no longer needed." items: properties: count: @@ -4752,6 +4752,19 @@ spec: x-kubernetes-list-map-keys: - "name" x-kubernetes-list-type: "map" + requeueState: + description: "requeueState holds the re-queue state\nwhen a workload meets Eviction with PodsReadyTimeout reason." + properties: + count: + description: "count records the number of times a workload has been re-queued\nWhen a deactivated (`.spec.activate`=`false`) workload is reactivated (`.spec.activate`=`true`),\nthis count would be reset to null." + format: "int32" + minimum: 0.0 + type: "integer" + requeueAt: + description: "requeueAt records the time when a workload will be re-queued.\nWhen a deactivated (`.spec.activate`=`false`) workload is reactivated (`.spec.activate`=`true`),\nthis time would be reset to null." + format: "date-time" + type: "string" + type: "object" type: "object" type: "object" served: true diff --git a/crd-catalog/kubev2v/forklift/forklift.konveyor.io/v1beta1/hosts.yaml b/crd-catalog/kubev2v/forklift/forklift.konveyor.io/v1beta1/hosts.yaml index 8f32eeb64..164f00213 100644 --- a/crd-catalog/kubev2v/forklift/forklift.konveyor.io/v1beta1/hosts.yaml +++ b/crd-catalog/kubev2v/forklift/forklift.konveyor.io/v1beta1/hosts.yaml @@ -102,9 +102,6 @@ spec: type: "string" type: "object" x-kubernetes-map-type: "atomic" - thumbprint: - description: "Certificate SHA-1 fingerprint, called thumbprint by VMware." - type: "string" type: description: "Type used to qualify the name." type: "string" diff --git a/crd-catalog/mariadb-operator/mariadb-operator/mariadb.mmontes.io/v1alpha1/mariadbs.yaml b/crd-catalog/mariadb-operator/mariadb-operator/mariadb.mmontes.io/v1alpha1/mariadbs.yaml index bb964c7d7..5e790058e 100644 --- a/crd-catalog/mariadb-operator/mariadb-operator/mariadb.mmontes.io/v1alpha1/mariadbs.yaml +++ b/crd-catalog/mariadb-operator/mariadb-operator/mariadb.mmontes.io/v1alpha1/mariadbs.yaml @@ -1767,7 +1767,7 @@ spec: description: "Replication configures high availability via Galera." properties: agent: - description: "GaleraAgent is a sidecar agent that co-operates with mariadb-operator. More info: https://github.com/mariadb-operator/agent." + description: "GaleraAgent is a sidecar agent that co-operates with mariadb-operator." properties: args: description: "Args to be used in the Container." @@ -2021,7 +2021,6 @@ spec: type: "integer" type: "object" port: - default: 5555 description: "Port where the agent will be listening for connections." format: "int32" type: "integer" @@ -3592,7 +3591,7 @@ spec: type: "integer" type: "object" maxScale: - description: "MaxScale is the MaxScale specification that defines the MaxScale instance that will be used with MariaDB. When enabling this field, MaxScaleRef is automatically set." + description: "MaxScale is the MaxScale specification that defines the MaxScale resource to be used with the current MariaDB. When enabling this field, MaxScaleRef is automatically set." properties: admin: description: "Admin configures the admin REST API and GUI." @@ -4214,7 +4213,7 @@ spec: params: additionalProperties: type: "string" - description: "Params is a key value pair of parameters to be used in the MaxScale static configuration file." + description: "Params is a key value pair of parameters to be used in the MaxScale static configuration file. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-maxscale-configuration-guide/#global-settings." type: "object" sync: description: "Sync defines how to replicate configuration across MaxScale replicas. It is defaulted when HA is enabled." @@ -4427,7 +4426,7 @@ spec: type: "string" type: "object" enabled: - description: "Enabled is a flag to enable Metrics" + description: "Enabled is a flag to enable a MaxScale instance to be used with the current MariaDB." type: "boolean" env: description: "Env represents the environment variables to be injected in a container." @@ -5209,7 +5208,7 @@ spec: type: "integer" type: "object" monitor: - description: "Monitor monitors MariaDB server instances. It is required if 'spec.mariaDbRef' is not provided." + description: "Monitor monitors MariaDB server instances." properties: cooperativeMonitoring: description: "CooperativeMonitoring enables coordination between multiple MaxScale instances running monitors. It is defaulted when HA is enabled." @@ -5229,7 +5228,7 @@ spec: params: additionalProperties: type: "string" - description: "Params defines extra parameters to pass to the monitor." + description: "Params defines extra parameters to pass to the monitor. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-common-monitor-parameters/. Monitor specific parameter are also suported: https://mariadb.com/kb/en/mariadb-maxscale-2308-galera-monitor/#galera-monitor-optional-parameters. https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-monitor/#configuration." type: "object" suspend: description: "Suspend indicates whether the current resource should be suspended or not. Feature flag --feature-maxscale-suspend is required in the controller to enable this." @@ -5450,12 +5449,11 @@ spec: type: "integer" type: "object" replicas: - default: 1 description: "Replicas indicates the number of desired instances." format: "int32" type: "integer" requeueInterval: - description: "RequeueInterval is used to perform requeue reconcilizations. If not defined, it defaults to 10s." + description: "RequeueInterval is used to perform requeue reconcilizations." type: "string" resources: description: "Resouces describes the compute resource requirements." @@ -5585,7 +5583,7 @@ spec: description: "ServiceAccountName is the name of the ServiceAccount to be used by the Pods." type: "string" services: - description: "Services define how the traffic is forwarded to the MariaDB servers. It is defaulted if not provided." + description: "Services define how the traffic is forwarded to the MariaDB servers." items: description: "Services define how the traffic is forwarded to the MariaDB servers." properties: @@ -5598,7 +5596,7 @@ spec: params: additionalProperties: type: "string" - description: "Params defines extra parameters to pass to the listener." + description: "Params defines extra parameters to pass to the listener. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-maxscale-configuration-guide/#listener_1." type: "object" port: description: "Port is the network port where the MaxScale server will listen." @@ -5617,7 +5615,7 @@ spec: params: additionalProperties: type: "string" - description: "Params defines extra parameters to pass to the monitor." + description: "Params defines extra parameters to pass to the monitor. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-maxscale-configuration-guide/#service_1. Router specific parameter are also suported: https://mariadb.com/kb/en/mariadb-maxscale-2308-readwritesplit/#configuration. https://mariadb.com/kb/en/mariadb-maxscale-2308-readconnroute/#configuration." type: "object" router: description: "Router is the type of router to use." @@ -7228,7 +7226,7 @@ spec: type: "array" type: "object" maxScaleRef: - description: "MaxScaleRef is a reference to a MaxScale instance that is forwarding the traffic to the current MariaDB instance. Providing this field implies delegating high availability tasks such as primary failover to MaxScale." + description: "MaxScaleRef is a reference to a MaxScale resource to be used with the current MariaDB. Providing this field implies delegating high availability tasks such as primary failover to MaxScale." properties: apiVersion: description: "API version of the referent." diff --git a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml index 86cda3e66..25b892d6e 100644 --- a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml +++ b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1alpha1/flowcollectors.yaml @@ -92,8 +92,8 @@ spec: type: "string" type: "array" kafkaBatchSize: - default: 10485760 - description: "kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB." + default: 1048576 + description: "kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB." type: "integer" logLevel: default: "info" diff --git a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml index 89f8e7cc8..e78419dcb 100644 --- a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml +++ b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta1/flowcollectors.yaml @@ -60,7 +60,7 @@ spec: minimum: 1.0 type: "integer" debug: - description: "`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk." + description: "`debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk." properties: env: additionalProperties: @@ -99,8 +99,8 @@ spec: type: "string" type: "array" kafkaBatchSize: - default: 10485760 - description: "`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB." + default: 1048576 + description: "`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB." type: "integer" logLevel: default: "info" @@ -115,7 +115,7 @@ spec: - "panic" type: "string" privileged: - description: "Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`)." + description: "Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`) and SR-IOV support." type: "boolean" resources: default: @@ -575,7 +575,7 @@ spec: type: "object" enable: default: true - description: "Enables the console plugin deployment. `spec.Loki.enable` must also be `true`" + description: "Enables the console plugin deployment. `spec.loki.enable` must also be `true`" type: "boolean" imagePullPolicy: default: "IfNotPresent" @@ -1214,7 +1214,7 @@ spec: description: "`conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows." type: "string" debug: - description: "`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk." + description: "`debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk." properties: env: additionalProperties: diff --git a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta2/flowcollectors.yaml b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta2/flowcollectors.yaml index 72a9bd544..50871567c 100644 --- a/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta2/flowcollectors.yaml +++ b/crd-catalog/netobserv/network-observability-operator/flows.netobserv.io/v1beta2/flowcollectors.yaml @@ -49,7 +49,7 @@ spec: description: "`ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`." properties: advanced: - description: "`advanced` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk." + description: "`advanced` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk." properties: env: additionalProperties: @@ -99,8 +99,8 @@ spec: type: "string" type: "array" kafkaBatchSize: - default: 10485760 - description: "`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB." + default: 1048576 + description: "`kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB." type: "integer" logLevel: default: "info" @@ -115,7 +115,7 @@ spec: - "panic" type: "string" privileged: - description: "Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`)." + description: "Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`) and SR-IOV support." type: "boolean" resources: default: @@ -228,7 +228,7 @@ spec: description: "`consolePlugin` defines the settings related to the OpenShift Console plugin, when available." properties: advanced: - description: "`advanced` allows setting some aspects of the internal configuration of the console plugin. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk." + description: "`advanced` allows setting some aspects of the internal configuration of the console plugin. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk." properties: args: description: "`args` allows passing custom arguments to underlying components. Useful for overriding some parameters, such as an url or a configuration path, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios." @@ -600,7 +600,7 @@ spec: type: "object" enable: default: true - description: "Enables the console plugin deployment. `spec.Loki.enable` must also be `true`" + description: "Enables the console plugin deployment. `spec.loki.enable` must also be `true`" type: "boolean" imagePullPolicy: default: "IfNotPresent" @@ -1049,7 +1049,7 @@ spec: description: "Set `enable` to `true` to store flows in Loki. It is required for the OpenShift Console plugin installation." type: "boolean" lokiStack: - description: "Loki configuration for \"LokiStack\" mode. This is useful for an easy loki-operator configuration. It is ignored for other modes." + description: "Loki configuration for `LokiStack` mode. This is useful for an easy loki-operator configuration. It is ignored for other modes." properties: name: default: "loki" @@ -1060,7 +1060,7 @@ spec: type: "string" type: "object" manual: - description: "Loki configuration for \"Manual\" mode. This is the most flexible configuration. It is ignored for other modes." + description: "Loki configuration for `Manual` mode. This is the most flexible configuration. It is ignored for other modes." properties: authToken: default: "Disabled" @@ -1203,7 +1203,7 @@ spec: type: "object" type: "object" microservices: - description: "Loki configuration for \"Microservices\" mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes." + description: "Loki configuration for `Microservices` mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes." properties: ingesterUrl: default: "http://loki-distributor:3100/" @@ -1278,7 +1278,7 @@ spec: type: "object" mode: default: "Monolithic" - description: "`mode` must be set according to the installation mode of Loki:
- Use \"LokiStack\" when Loki is managed using the Loki Operator
- Use \"Monolithic\" when Loki is installed as a monolithic workload
- Use \"Microservices\" when Loki is installed as microservices, but without Loki Operator
- Use \"Manual\" if none of the options above match your setup
" + description: "`mode` must be set according to the installation mode of Loki:
- Use `LokiStack` when Loki is managed using the Loki Operator
- Use `Monolithic` when Loki is installed as a monolithic workload
- Use `Microservices` when Loki is installed as microservices, but without Loki Operator
- Use `Manual` if none of the options above match your setup
" enum: - "Manual" - "LokiStack" @@ -1286,7 +1286,7 @@ spec: - "Microservices" type: "string" monolithic: - description: "Loki configuration for \"Monolithic\" mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes." + description: "Loki configuration for `Monolithic` mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes." properties: tenantID: default: "netobserv" @@ -1385,7 +1385,7 @@ spec: description: "`addZone` allows availability zone awareness by labelling flows with their source and destination zones. This feature requires the \"topology.kubernetes.io/zone\" label to be set on nodes." type: "boolean" advanced: - description: "`advanced` allows setting some aspects of the internal configuration of the flow processor. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk." + description: "`advanced` allows setting some aspects of the internal configuration of the flow processor. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk." properties: conversationEndTimeout: default: "10s" diff --git a/crd-catalog/open-telemetry/opentelemetry-operator/opentelemetry.io/v1alpha1/opentelemetrycollectors.yaml b/crd-catalog/open-telemetry/opentelemetry-operator/opentelemetry.io/v1alpha1/opentelemetrycollectors.yaml index 3cf86eebe..9b1841785 100644 --- a/crd-catalog/open-telemetry/opentelemetry-operator/opentelemetry.io/v1alpha1/opentelemetrycollectors.yaml +++ b/crd-catalog/open-telemetry/opentelemetry-operator/opentelemetry.io/v1alpha1/opentelemetrycollectors.yaml @@ -3688,10 +3688,11 @@ spec: type: "object" allocationStrategy: default: "consistent-hashing" - description: "AllocationStrategy determines which strategy the target allocator should use for allocation. The current options are least-weighted and consistent-hashing. The default option is consistent-hashing" + description: "AllocationStrategy determines which strategy the target allocator should use for allocation. The current options are least-weighted, consistent-hashing and per-node. The default is consistent-hashing." enum: - "least-weighted" - "consistent-hashing" + - "per-node" type: "string" enabled: description: "Enabled indicates whether to use a target allocation mechanism for Prometheus targets or not." diff --git a/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml b/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml index f5d48fd13..fd3a7ed37 100644 --- a/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml +++ b/crd-catalog/openshift/api/config.openshift.io/v1/infrastructures.yaml @@ -434,6 +434,12 @@ spec: maxLength: 2048 pattern: "^/.*?/host/.*?/Resources.*" type: "string" + template: + description: "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. \n When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: "^/.*?/vm/.*?" + type: "string" required: - "computeCluster" - "datacenter" diff --git a/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml b/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml index d689b1827..8d1bb6def 100644 --- a/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml +++ b/crd-catalog/openshift/api/ingress.operator.openshift.io/v1/dnsrecords.yaml @@ -3,6 +3,7 @@ kind: "CustomResourceDefinition" metadata: annotations: api-approved.openshift.io: "https://github.com/openshift/api/pull/584" + capability.openshift.io/name: "Ingress" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/crd-catalog/openshift/api/machineconfiguration.openshift.io/v1/controllerconfigs.yaml b/crd-catalog/openshift/api/machineconfiguration.openshift.io/v1/controllerconfigs.yaml index 0f6a6851f..e563b78c6 100644 --- a/crd-catalog/openshift/api/machineconfiguration.openshift.io/v1/controllerconfigs.yaml +++ b/crd-catalog/openshift/api/machineconfiguration.openshift.io/v1/controllerconfigs.yaml @@ -604,6 +604,12 @@ spec: maxLength: 2048 pattern: "^/.*?/host/.*?/Resources.*" type: "string" + template: + description: "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. \n When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: "^/.*?/vm/.*?" + type: "string" required: - "computeCluster" - "datacenter" diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml index 77ee377b3..192649f3a 100644 --- a/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/clustercsidrivers.yaml @@ -60,7 +60,7 @@ spec: properties: kmsKeyARN: description: "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key." - pattern: "^arn:(aws|aws-cn|aws-us-gov):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\\/.*$" + pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\\/.*$" type: "string" type: "object" azure: diff --git a/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml b/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml index 35a0a84a4..6e2fe68e0 100644 --- a/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml +++ b/crd-catalog/openshift/api/operator.openshift.io/v1/ingresscontrollers.yaml @@ -3,6 +3,7 @@ kind: "CustomResourceDefinition" metadata: annotations: api-approved.openshift.io: "https://github.com/openshift/api/pull/616" + capability.openshift.io/name: "Ingress" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" diff --git a/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeployments.yaml b/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeployments.yaml index 33f7bcad0..f26a6dd18 100644 --- a/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeployments.yaml +++ b/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeployments.yaml @@ -373,24 +373,6 @@ spec: required: - "agentSelector" type: "object" - alibabacloud: - description: "AlibabaCloud is the configuration used when installing on Alibaba Cloud" - properties: - credentialsSecretRef: - description: "CredentialsSecretRef refers to a secret that contains Alibaba Cloud account access credentials." - properties: - name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" - type: "string" - type: "object" - x-kubernetes-map-type: "atomic" - region: - description: "Region specifies the Alibaba Cloud region where the cluster will be created." - type: "string" - required: - - "credentialsSecretRef" - - "region" - type: "object" aws: description: "AWS is the configuration used when installing on AWS." properties: diff --git a/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeprovisions.yaml b/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeprovisions.yaml index 71bcd8cfa..b3a235649 100644 --- a/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeprovisions.yaml +++ b/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterdeprovisions.yaml @@ -59,28 +59,6 @@ spec: platform: description: "Platform contains platform-specific configuration for a ClusterDeprovision" properties: - alibabacloud: - description: "AlibabaCloud contains Alibaba Cloud specific deprovision settings" - properties: - baseDomain: - description: "BaseDomain is the DNS base domain. TODO: Use the non-platform-specific BaseDomain field." - type: "string" - credentialsSecretRef: - description: "CredentialsSecretRef is the Alibaba account credentials to use for deprovisioning the cluster" - properties: - name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" - type: "string" - type: "object" - x-kubernetes-map-type: "atomic" - region: - description: "Region is the Alibaba region for this deprovision" - type: "string" - required: - - "baseDomain" - - "credentialsSecretRef" - - "region" - type: "object" aws: description: "AWS contains AWS-specific deprovision settings" properties: diff --git a/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterpools.yaml b/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterpools.yaml index c6a5d43dc..4af60d149 100644 --- a/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterpools.yaml +++ b/crd-catalog/openshift/hive/hive.openshift.io/v1/clusterpools.yaml @@ -170,24 +170,6 @@ spec: required: - "agentSelector" type: "object" - alibabacloud: - description: "AlibabaCloud is the configuration used when installing on Alibaba Cloud" - properties: - credentialsSecretRef: - description: "CredentialsSecretRef refers to a secret that contains Alibaba Cloud account access credentials." - properties: - name: - description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?" - type: "string" - type: "object" - x-kubernetes-map-type: "atomic" - region: - description: "Region specifies the Alibaba Cloud region where the cluster will be created." - type: "string" - required: - - "credentialsSecretRef" - - "region" - type: "object" aws: description: "AWS is the configuration used when installing on AWS." properties: diff --git a/crd-catalog/openshift/hive/hive.openshift.io/v1/machinepools.yaml b/crd-catalog/openshift/hive/hive.openshift.io/v1/machinepools.yaml index f05712d33..d8762c27a 100644 --- a/crd-catalog/openshift/hive/hive.openshift.io/v1/machinepools.yaml +++ b/crd-catalog/openshift/hive/hive.openshift.io/v1/machinepools.yaml @@ -73,32 +73,6 @@ spec: platform: description: "Platform is configuration for machine pool specific to the platform." properties: - alibabacloud: - description: "AlibabaCloud is the configuration used when installing on Alibaba Cloud." - properties: - imageID: - description: "ImageID is the Image ID that should be used to create ECS instance. If set, the ImageID should belong to the same region as the cluster." - type: "string" - instanceType: - description: "InstanceType defines the ECS instance type. eg. ecs.g6.large" - type: "string" - systemDiskCategory: - description: "SystemDiskCategory defines the category of the system disk." - enum: - - "" - - "cloud_efficiency" - - "cloud_essd" - type: "string" - systemDiskSize: - description: "SystemDiskSize defines the size of the system disk in gibibytes (GiB)." - minimum: 120.0 - type: "integer" - zones: - description: "Zones is list of availability zones that can be used. eg. [\"cn-hangzhou-i\", \"cn-hangzhou-h\", \"cn-hangzhou-j\"]" - items: - type: "string" - type: "array" - type: "object" aws: description: "AWS is the configuration used when installing on AWS." properties: diff --git a/crd-catalog/projectcalico/calico/crd.projectcalico.org/v1/felixconfigurations.yaml b/crd-catalog/projectcalico/calico/crd.projectcalico.org/v1/felixconfigurations.yaml index 5a316c889..6f1dc2acb 100644 --- a/crd-catalog/projectcalico/calico/crd.projectcalico.org/v1/felixconfigurations.yaml +++ b/crd-catalog/projectcalico/calico/crd.projectcalico.org/v1/felixconfigurations.yaml @@ -102,7 +102,7 @@ spec: - "Disabled" type: "string" bpfKubeProxyEndpointSlicesEnabled: - description: "BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls whether Felix's embedded kube-proxy accepts EndpointSlices or not." + description: "BPFKubeProxyEndpointSlicesEnabled is deprecated and has no effect. BPF kube-proxy always accepts endpoint slices. This option will be removed in the next release." type: "boolean" bpfKubeProxyIptablesCleanupEnabled: description: "BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF mode, Felix will proactively clean up the upstream Kubernetes kube-proxy's iptables chains. Should only be enabled if kube-proxy is not running. [Default: true]" diff --git a/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourconfigurations.yaml b/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourconfigurations.yaml index 1fc8ab20c..611b7bdc4 100644 --- a/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourconfigurations.yaml +++ b/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourconfigurations.yaml @@ -330,11 +330,8 @@ spec: gateway: description: "Gateway contains parameters for the gateway-api Gateway that Contour\nis configured to serve traffic." properties: - controllerName: - description: "ControllerName is used to determine whether Contour should reconcile a\nGatewayClass. The string takes the form of \"projectcontour.io//contour\".\nIf unset, the gatewayclass controller will not be started.\nExactly one of ControllerName or GatewayRef must be set.\nDeprecated: users should use GatewayRef, or the Gateway provisioner,\nin place of this field. This field will be removed in a future release." - type: "string" gatewayRef: - description: "GatewayRef defines a specific Gateway that this Contour\ninstance corresponds to. If set, Contour will reconcile\nonly this gateway, and will not reconcile any gateway\nclasses.\nExactly one of ControllerName or GatewayRef must be set." + description: "GatewayRef defines the specific Gateway that this Contour\ninstance corresponds to." properties: name: type: "string" @@ -344,6 +341,8 @@ spec: - "name" - "namespace" type: "object" + required: + - "gatewayRef" type: "object" globalExtAuth: description: "GlobalExternalAuthorization allows envoys external authorization filter\nto be enabled for all virtual hosts." diff --git a/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourdeployments.yaml b/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourdeployments.yaml index a89a48cd1..83cd880a1 100644 --- a/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourdeployments.yaml +++ b/crd-catalog/projectcontour/contour/projectcontour.io/v1alpha1/contourdeployments.yaml @@ -1675,11 +1675,8 @@ spec: gateway: description: "Gateway contains parameters for the gateway-api Gateway that Contour\nis configured to serve traffic." properties: - controllerName: - description: "ControllerName is used to determine whether Contour should reconcile a\nGatewayClass. The string takes the form of \"projectcontour.io//contour\".\nIf unset, the gatewayclass controller will not be started.\nExactly one of ControllerName or GatewayRef must be set.\nDeprecated: users should use GatewayRef, or the Gateway provisioner,\nin place of this field. This field will be removed in a future release." - type: "string" gatewayRef: - description: "GatewayRef defines a specific Gateway that this Contour\ninstance corresponds to. If set, Contour will reconcile\nonly this gateway, and will not reconcile any gateway\nclasses.\nExactly one of ControllerName or GatewayRef must be set." + description: "GatewayRef defines the specific Gateway that this Contour\ninstance corresponds to." properties: name: type: "string" @@ -1689,6 +1686,8 @@ spec: - "name" - "namespace" type: "object" + required: + - "gatewayRef" type: "object" globalExtAuth: description: "GlobalExternalAuthorization allows envoys external authorization filter\nto be enabled for all virtual hosts." diff --git a/crd-catalog/prometheus-operator/prometheus-operator/monitoring.coreos.com/v1/servicemonitors.yaml b/crd-catalog/prometheus-operator/prometheus-operator/monitoring.coreos.com/v1/servicemonitors.yaml index f8b1eee69..8977a95ed 100644 --- a/crd-catalog/prometheus-operator/prometheus-operator/monitoring.coreos.com/v1/servicemonitors.yaml +++ b/crd-catalog/prometheus-operator/prometheus-operator/monitoring.coreos.com/v1/servicemonitors.yaml @@ -360,7 +360,7 @@ spec: anyOf: - type: "integer" - type: "string" - description: "Name or number of the target port of the `Pod` object behind the Service, the port must be specified with container port property. \n Deprecated: use `port` instead." + description: "Name or number of the target port of the `Pod` object behind the Service. The port must be specified with the container's port property." x-kubernetes-int-or-string: true tlsConfig: description: "TLS configuration to use when scraping the target." diff --git a/crd-catalog/quay/quay-operator/quay.redhat.com/v1/quayregistries.yaml b/crd-catalog/quay/quay-operator/quay.redhat.com/v1/quayregistries.yaml index 2df7d2f7e..14bbb6623 100644 --- a/crd-catalog/quay/quay-operator/quay.redhat.com/v1/quayregistries.yaml +++ b/crd-catalog/quay/quay-operator/quay.redhat.com/v1/quayregistries.yaml @@ -609,6 +609,28 @@ spec: replicas: format: "int32" type: "integer" + resources: + description: "Resources describes the resource limits and requests for a component." + properties: + limits: + additionalProperties: + anyOf: + - type: "integer" + - type: "string" + pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" + x-kubernetes-int-or-string: true + description: "ResourceList is a set of (resource name, quantity) pairs." + type: "object" + requests: + additionalProperties: + anyOf: + - type: "integer" + - type: "string" + pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$" + x-kubernetes-int-or-string: true + description: "ResourceList is a set of (resource name, quantity) pairs." + type: "object" + type: "object" volumeSize: anyOf: - type: "integer" diff --git a/crd-catalog/rook/rook/ceph.rook.io/v1/cephblockpools.yaml b/crd-catalog/rook/rook/ceph.rook.io/v1/cephblockpools.yaml index 4fd516a05..4b2d2d434 100644 --- a/crd-catalog/rook/rook/ceph.rook.io/v1/cephblockpools.yaml +++ b/crd-catalog/rook/rook/ceph.rook.io/v1/cephblockpools.yaml @@ -33,6 +33,9 @@ spec: spec: description: "NamedBlockPoolSpec allows a block pool to be created with a non-default name." properties: + application: + description: "The application name to set on the pool. Only expected to be set for rgw pools." + type: "string" compressionMode: description: "DEPRECATED: use Parameters instead, e.g." enum: @@ -114,7 +117,7 @@ spec: name: description: "The desired name of the pool if different from the CephBlockPool CR name." enum: - - "device_health_metrics" + - ".rgw.root" - ".nfs" - ".mgr" type: "string" diff --git a/crd-catalog/rook/rook/ceph.rook.io/v1/cephclusters.yaml b/crd-catalog/rook/rook/ceph.rook.io/v1/cephclusters.yaml index 6f7baad31..1faac3f87 100644 --- a/crd-catalog/rook/rook/ceph.rook.io/v1/cephclusters.yaml +++ b/crd-catalog/rook/rook/ceph.rook.io/v1/cephclusters.yaml @@ -1447,6 +1447,8 @@ spec: x-kubernetes-validations: - message: "at least one network selector must be specified when using multus" rule: "!has(self.provider) || (self.provider != 'multus' || (self.provider == 'multus' && size(self.selectors) > 0))" + - message: "the legacy hostNetwork setting can only be set if the network.provider is set to the empty string" + rule: "!has(self.hostNetwork) || self.hostNetwork == false || !has(self.provider) || self.provider == \"\"" placement: additionalProperties: description: "Placement is the placement for an object" diff --git a/crd-catalog/rook/rook/ceph.rook.io/v1/cephfilesystems.yaml b/crd-catalog/rook/rook/ceph.rook.io/v1/cephfilesystems.yaml index 351a016c4..3baacb513 100644 --- a/crd-catalog/rook/rook/ceph.rook.io/v1/cephfilesystems.yaml +++ b/crd-catalog/rook/rook/ceph.rook.io/v1/cephfilesystems.yaml @@ -45,6 +45,9 @@ spec: items: description: "NamedPoolSpec represents the named ceph pool spec" properties: + application: + description: "The application name to set on the pool. Only expected to be set for rgw pools." + type: "string" compressionMode: description: "DEPRECATED: use Parameters instead, e.g." enum: @@ -213,6 +216,9 @@ spec: description: "The metadata pool settings" nullable: true properties: + application: + description: "The application name to set on the pool. Only expected to be set for rgw pools." + type: "string" compressionMode: description: "DEPRECATED: use Parameters instead, e.g." enum: diff --git a/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectstores.yaml b/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectstores.yaml index 90d095aa4..18bd9f942 100644 --- a/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectstores.yaml +++ b/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectstores.yaml @@ -42,6 +42,9 @@ spec: description: "The data pool settings" nullable: true properties: + application: + description: "The application name to set on the pool. Only expected to be set for rgw pools." + type: "string" compressionMode: description: "DEPRECATED: use Parameters instead, e.g." enum: @@ -1165,6 +1168,9 @@ spec: description: "The metadata pool settings" nullable: true properties: + application: + description: "The application name to set on the pool. Only expected to be set for rgw pools." + type: "string" compressionMode: description: "DEPRECATED: use Parameters instead, e.g." enum: diff --git a/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectzones.yaml b/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectzones.yaml index 65ec988e1..749ce565d 100644 --- a/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectzones.yaml +++ b/crd-catalog/rook/rook/ceph.rook.io/v1/cephobjectzones.yaml @@ -43,6 +43,9 @@ spec: description: "The data pool settings" nullable: true properties: + application: + description: "The application name to set on the pool. Only expected to be set for rgw pools." + type: "string" compressionMode: description: "DEPRECATED: use Parameters instead, e.g." enum: @@ -206,6 +209,9 @@ spec: description: "The metadata pool settings" nullable: true properties: + application: + description: "The application name to set on the pool. Only expected to be set for rgw pools." + type: "string" compressionMode: description: "DEPRECATED: use Parameters instead, e.g." enum: diff --git a/crd-catalog/solo-io/gloo/gateway.solo.io/v1/gateways.yaml b/crd-catalog/solo-io/gloo/gateway.solo.io/v1/gateways.yaml index 68eaf1b34..55e087b3b 100644 --- a/crd-catalog/solo-io/gloo/gateway.solo.io/v1/gateways.yaml +++ b/crd-catalog/solo-io/gloo/gateway.solo.io/v1/gateways.yaml @@ -1222,6 +1222,11 @@ spec: type: "boolean" enableXRatelimitHeaders: type: "boolean" + grpcService: + properties: + authority: + type: "string" + type: "object" rateLimitBeforeAuth: type: "boolean" ratelimitServerRef: @@ -3101,6 +3106,11 @@ spec: type: "boolean" enableXRatelimitHeaders: type: "boolean" + grpcService: + properties: + authority: + type: "string" + type: "object" rateLimitBeforeAuth: type: "boolean" ratelimitServerRef: diff --git a/crd-catalog/solo-io/gloo/gateway.solo.io/v1/httpgateways.yaml b/crd-catalog/solo-io/gloo/gateway.solo.io/v1/httpgateways.yaml index 69ba9735f..2e8bf8362 100644 --- a/crd-catalog/solo-io/gloo/gateway.solo.io/v1/httpgateways.yaml +++ b/crd-catalog/solo-io/gloo/gateway.solo.io/v1/httpgateways.yaml @@ -1217,6 +1217,11 @@ spec: type: "boolean" enableXRatelimitHeaders: type: "boolean" + grpcService: + properties: + authority: + type: "string" + type: "object" rateLimitBeforeAuth: type: "boolean" ratelimitServerRef: diff --git a/crd-catalog/solo-io/gloo/gloo.solo.io/v1/settings.yaml b/crd-catalog/solo-io/gloo/gloo.solo.io/v1/settings.yaml index 40d4f5a77..7b074ebb0 100644 --- a/crd-catalog/solo-io/gloo/gloo.solo.io/v1/settings.yaml +++ b/crd-catalog/solo-io/gloo/gloo.solo.io/v1/settings.yaml @@ -848,6 +848,11 @@ spec: type: "boolean" enableXRatelimitHeaders: type: "boolean" + grpcService: + properties: + authority: + type: "string" + type: "object" rateLimitBeforeAuth: type: "boolean" ratelimitServerRef: diff --git a/crd-catalog/wildfly/wildfly-operator/wildfly.org/v1alpha1/wildflyservers.yaml b/crd-catalog/wildfly/wildfly-operator/wildfly.org/v1alpha1/wildflyservers.yaml index 015f3683f..bd0892c18 100644 --- a/crd-catalog/wildfly/wildfly-operator/wildfly.org/v1alpha1/wildflyservers.yaml +++ b/crd-catalog/wildfly/wildfly-operator/wildfly.org/v1alpha1/wildflyservers.yaml @@ -168,6 +168,156 @@ spec: minItems: 1 type: "array" x-kubernetes-list-type: "atomic" + livenessProbe: + description: "LivenessProbe defines the periodic probe of container liveness. Container will be restarted if the probe fails." + properties: + exec: + description: "Exec specifies a command action to take." + properties: + command: + description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + items: + type: "string" + type: "array" + type: "object" + failureThreshold: + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + httpGet: + description: "HTTPGet specifies the http request to perform." + properties: + host: + description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + type: "string" + httpHeaders: + description: "Custom headers to set in the request. HTTP allows repeated headers." + items: + description: "HTTPHeader describes a custom header to be used in HTTP probes" + properties: + name: + description: "The header field name" + type: "string" + value: + description: "The header field value" + type: "string" + required: + - "name" + - "value" + type: "object" + type: "array" + path: + description: "Path to access on the HTTP server." + type: "string" + port: + anyOf: + - type: "integer" + - type: "string" + description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + x-kubernetes-int-or-string: true + scheme: + description: "Scheme to use for connecting to the host. Defaults to HTTP." + type: "string" + required: + - "port" + type: "object" + initialDelaySeconds: + description: "Number of seconds after the container has started before probes are initiated. It defaults to 60 seconds for liveness probe. It defaults to 10 seconds for readiness probe. It defaults to 0 seconds for startup probe. Minimum value is 0." + format: "int32" + minimum: 0.0 + type: "integer" + periodSeconds: + description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + successThreshold: + description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + timeoutSeconds: + description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + format: "int32" + minimum: 1.0 + type: "integer" + type: "object" + readinessProbe: + description: "ReadinessProbe defines the periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails." + properties: + exec: + description: "Exec specifies a command action to take." + properties: + command: + description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + items: + type: "string" + type: "array" + type: "object" + failureThreshold: + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + httpGet: + description: "HTTPGet specifies the http request to perform." + properties: + host: + description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + type: "string" + httpHeaders: + description: "Custom headers to set in the request. HTTP allows repeated headers." + items: + description: "HTTPHeader describes a custom header to be used in HTTP probes" + properties: + name: + description: "The header field name" + type: "string" + value: + description: "The header field value" + type: "string" + required: + - "name" + - "value" + type: "object" + type: "array" + path: + description: "Path to access on the HTTP server." + type: "string" + port: + anyOf: + - type: "integer" + - type: "string" + description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + x-kubernetes-int-or-string: true + scheme: + description: "Scheme to use for connecting to the host. Defaults to HTTP." + type: "string" + required: + - "port" + type: "object" + initialDelaySeconds: + description: "Number of seconds after the container has started before probes are initiated. It defaults to 60 seconds for liveness probe. It defaults to 10 seconds for readiness probe. It defaults to 0 seconds for startup probe. Minimum value is 0." + format: "int32" + minimum: 0.0 + type: "integer" + periodSeconds: + description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + successThreshold: + description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + timeoutSeconds: + description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + format: "int32" + minimum: 1.0 + type: "integer" + type: "object" replicas: description: "Replicas is the desired number of replicas for the application" format: "int32" @@ -305,6 +455,81 @@ spec: required: - "name" type: "object" + startupProbe: + description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation." + properties: + exec: + description: "Exec specifies a command action to take." + properties: + command: + description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy." + items: + type: "string" + type: "array" + type: "object" + failureThreshold: + description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + httpGet: + description: "HTTPGet specifies the http request to perform." + properties: + host: + description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead." + type: "string" + httpHeaders: + description: "Custom headers to set in the request. HTTP allows repeated headers." + items: + description: "HTTPHeader describes a custom header to be used in HTTP probes" + properties: + name: + description: "The header field name" + type: "string" + value: + description: "The header field value" + type: "string" + required: + - "name" + - "value" + type: "object" + type: "array" + path: + description: "Path to access on the HTTP server." + type: "string" + port: + anyOf: + - type: "integer" + - type: "string" + description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + x-kubernetes-int-or-string: true + scheme: + description: "Scheme to use for connecting to the host. Defaults to HTTP." + type: "string" + required: + - "port" + type: "object" + initialDelaySeconds: + description: "Number of seconds after the container has started before probes are initiated. It defaults to 60 seconds for liveness probe. It defaults to 10 seconds for readiness probe. It defaults to 0 seconds for startup probe. Minimum value is 0." + format: "int32" + minimum: 0.0 + type: "integer" + periodSeconds: + description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + successThreshold: + description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1." + format: "int32" + minimum: 1.0 + type: "integer" + timeoutSeconds: + description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + format: "int32" + minimum: 1.0 + type: "integer" + type: "object" storage: description: "StorageSpec defines specific storage required for the server own data directory. If omitted, an EmptyDir is used (that will not persist data across pod restart)." properties: diff --git a/crd-catalog/zalando/postgres-operator/acid.zalan.do/v1/postgresqls.yaml b/crd-catalog/zalando/postgres-operator/acid.zalan.do/v1/postgresqls.yaml index 266009954..a52160f26 100644 --- a/crd-catalog/zalando/postgres-operator/acid.zalan.do/v1/postgresqls.yaml +++ b/crd-catalog/zalando/postgres-operator/acid.zalan.do/v1/postgresqls.yaml @@ -574,6 +574,11 @@ spec: nullable: true type: "array" type: "object" + usersIgnoringSecretRotation: + items: + type: "string" + nullable: true + type: "array" usersWithInPlaceSecretRotation: items: type: "string" diff --git a/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/mod.rs b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/mod.rs index c464a3de5..fbf74dd15 100644 --- a/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/mod.rs +++ b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/mod.rs @@ -1,2 +1,3 @@ +pub mod v1alpha3; pub mod v1alpha4; pub mod v1beta1; diff --git a/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesetbindings.rs b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesetbindings.rs new file mode 100644 index 000000000..9489e952e --- /dev/null +++ b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesetbindings.rs @@ -0,0 +1,55 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesetbindings.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// ClusterResourceSetBindingSpec defines the desired state of ClusterResourceSetBinding. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "addons.cluster.x-k8s.io", version = "v1alpha3", kind = "ClusterResourceSetBinding", plural = "clusterresourcesetbindings")] +#[kube(namespaced)] +#[kube(status = "ClusterResourceSetBindingStatus")] +#[kube(schema = "disabled")] +pub struct ClusterResourceSetBindingSpec { + /// Bindings is a list of ClusterResourceSets and their resources. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub bindings: Option>, +} + +/// ResourceSetBinding keeps info on all of the resources in a ClusterResourceSet. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceSetBindingBindings { + /// ClusterResourceSetName is the name of the ClusterResourceSet that is applied to the owner cluster of the binding. + #[serde(rename = "clusterResourceSetName")] + pub cluster_resource_set_name: String, + /// Resources is a list of resources that the ClusterResourceSet has. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resources: Option>, +} + +/// ResourceBinding shows the status of a resource that belongs to a ClusterResourceSet matched by the owner cluster of the ClusterResourceSetBinding object. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceSetBindingBindingsResources { + /// Applied is to track if a resource is applied to the cluster or not. + pub applied: bool, + /// Hash is the hash of a resource's data. This can be used to decide if a resource is changed. + /// For "ApplyOnce" ClusterResourceSet.spec.strategy, this is no-op as that strategy does not act on change. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hash: Option, + /// Kind of the resource. Supported kinds are: Secrets and ConfigMaps. + pub kind: ClusterResourceSetBindingBindingsResourcesKind, + /// LastAppliedTime identifies when this resource was last applied to the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastAppliedTime")] + pub last_applied_time: Option, + /// Name of the resource that is in the same namespace with ClusterResourceSet object. + pub name: String, +} + +/// ResourceBinding shows the status of a resource that belongs to a ClusterResourceSet matched by the owner cluster of the ClusterResourceSetBinding object. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterResourceSetBindingBindingsResourcesKind { + Secret, + ConfigMap, +} + diff --git a/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesets.rs b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesets.rs new file mode 100644 index 000000000..6be889e4c --- /dev/null +++ b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/clusterresourcesets.rs @@ -0,0 +1,124 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/addons.cluster.x-k8s.io/v1alpha3/clusterresourcesets.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// ClusterResourceSetSpec defines the desired state of ClusterResourceSet. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "addons.cluster.x-k8s.io", version = "v1alpha3", kind = "ClusterResourceSet", plural = "clusterresourcesets")] +#[kube(namespaced)] +#[kube(status = "ClusterResourceSetStatus")] +#[kube(schema = "disabled")] +pub struct ClusterResourceSetSpec { + /// Label selector for Clusters. The Clusters that are + /// selected by this will be the ones affected by this ClusterResourceSet. + /// It must match the Cluster labels. This field is immutable. + #[serde(rename = "clusterSelector")] + pub cluster_selector: ClusterResourceSetClusterSelector, + /// Resources is a list of Secrets/ConfigMaps where each contains 1 or more resources to be applied to remote clusters. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resources: Option>, + /// Strategy is the strategy to be used during applying resources. Defaults to ApplyOnce. This field is immutable. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub strategy: Option, +} + +/// Label selector for Clusters. The Clusters that are +/// selected by this will be the ones affected by this ClusterResourceSet. +/// It must match the Cluster labels. This field is immutable. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceSetClusterSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceSetClusterSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// ResourceRef specifies a resource. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceSetResources { + /// Kind of the resource. Supported kinds are: Secrets and ConfigMaps. + pub kind: ClusterResourceSetResourcesKind, + /// Name of the resource that is in the same namespace with ClusterResourceSet object. + pub name: String, +} + +/// ResourceRef specifies a resource. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterResourceSetResourcesKind { + Secret, + ConfigMap, +} + +/// ClusterResourceSetSpec defines the desired state of ClusterResourceSet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ClusterResourceSetStrategy { + ApplyOnce, +} + +/// ClusterResourceSetStatus defines the observed state of ClusterResourceSet. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceSetStatus { + /// Conditions defines current state of the ClusterResourceSet. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// ObservedGeneration reflects the generation of the most recently observed ClusterResourceSet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterResourceSetStatusConditions { + /// Last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when + /// the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. + /// This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. + /// The specific API may choose whether or not this field is considered a guaranteed API. + /// This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately + /// understand the current situation and act accordingly. + /// The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + /// can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/mod.rs b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/mod.rs new file mode 100644 index 000000000..12b985470 --- /dev/null +++ b/kube-custom-resources-rs/src/addons_cluster_x_k8s_io/v1alpha3/mod.rs @@ -0,0 +1,2 @@ +pub mod clusterresourcesetbindings; +pub mod clusterresourcesets; diff --git a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/apis.rs b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/apis.rs index 6310d8e40..7ee52cf66 100644 --- a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/apis.rs +++ b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/apis.rs @@ -6,8 +6,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// ApiSpec defines the desired state of Api. -/// Represents an API. +/// ApiSpec defines the desired state of Api. +/// +/// +/// Represents an API. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "apigatewayv2.services.k8s.aws", version = "v1alpha1", kind = "API", plural = "apis")] #[kube(namespaced)] @@ -20,7 +22,9 @@ pub struct APISpec { pub basepath: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub body: Option, - /// Represents a CORS configuration. Supported only for HTTP APIs. See Configuring CORS (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-cors.html) for more information. + /// Represents a CORS configuration. Supported only for HTTP APIs. See Configuring + /// CORS (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-cors.html) + /// for more information. #[serde(default, skip_serializing_if = "Option::is_none", rename = "corsConfiguration")] pub cors_configuration: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "credentialsARN")] @@ -49,7 +53,9 @@ pub struct APISpec { pub version: Option, } -/// Represents a CORS configuration. Supported only for HTTP APIs. See Configuring CORS (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-cors.html) for more information. +/// Represents a CORS configuration. Supported only for HTTP APIs. See Configuring +/// CORS (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-cors.html) +/// for more information. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct APICorsConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowCredentials")] @@ -74,7 +80,9 @@ pub struct APICorsConfiguration { /// APIStatus defines the observed state of API #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct APIStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiEndpoint")] @@ -83,7 +91,10 @@ pub struct APIStatus { pub api_gateway_managed: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiID")] pub api_id: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "createdDate")] @@ -94,20 +105,32 @@ pub struct APIStatus { pub warnings: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct APIStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct APIStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/authorizers.rs b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/authorizers.rs index c71b68ef5..4f90163ed 100644 --- a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/authorizers.rs +++ b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/authorizers.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// AuthorizerSpec defines the desired state of Authorizer. -/// Represents an authorizer. +/// AuthorizerSpec defines the desired state of Authorizer. +/// +/// +/// Represents an authorizer. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "apigatewayv2.services.k8s.aws", version = "v1alpha1", kind = "Authorizer", plural = "authorizers")] #[kube(namespaced)] @@ -15,8 +17,14 @@ use serde::{Serialize, Deserialize}; pub struct AuthorizerSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiID")] pub api_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiRef")] pub api_ref: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "authorizerCredentialsARN")] @@ -35,29 +43,39 @@ pub struct AuthorizerSpec { pub identity_source: Vec, #[serde(default, skip_serializing_if = "Option::is_none", rename = "identityValidationExpression")] pub identity_validation_expression: Option, - /// Represents the configuration of a JWT authorizer. Required for the JWT authorizer type. Supported only for HTTP APIs. + /// Represents the configuration of a JWT authorizer. Required for the JWT authorizer + /// type. Supported only for HTTP APIs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "jwtConfiguration")] pub jwt_configuration: Option, pub name: String, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AuthorizerApiRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AuthorizerApiRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// Represents the configuration of a JWT authorizer. Required for the JWT authorizer type. Supported only for HTTP APIs. +/// Represents the configuration of a JWT authorizer. Required for the JWT authorizer +/// type. Supported only for HTTP APIs. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AuthorizerJwtConfiguration { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -70,30 +88,47 @@ pub struct AuthorizerJwtConfiguration { /// AuthorizerStatus defines the observed state of Authorizer #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AuthorizerStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "authorizerID")] pub authorizer_id: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AuthorizerStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AuthorizerStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/deployments.rs b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/deployments.rs index e81a2be24..cb27fbebd 100644 --- a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/deployments.rs +++ b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/deployments.rs @@ -5,8 +5,11 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// DeploymentSpec defines the desired state of Deployment. -/// An immutable representation of an API that can be called by users. A Deployment must be associated with a Stage for it to be callable over the internet. +/// DeploymentSpec defines the desired state of Deployment. +/// +/// +/// An immutable representation of an API that can be called by users. A Deployment +/// must be associated with a Stage for it to be callable over the internet. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "apigatewayv2.services.k8s.aws", version = "v1alpha1", kind = "Deployment", plural = "deployments")] #[kube(namespaced)] @@ -15,8 +18,14 @@ use serde::{Serialize, Deserialize}; pub struct DeploymentSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiID")] pub api_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiRef")] pub api_ref: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -25,16 +34,24 @@ pub struct DeploymentSpec { pub stage_name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DeploymentApiRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DeploymentApiRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -44,12 +61,17 @@ pub struct DeploymentApiRefFrom { /// DeploymentStatus defines the observed state of Deployment #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DeploymentStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoDeployed")] pub auto_deployed: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "createdDate")] @@ -62,20 +84,32 @@ pub struct DeploymentStatus { pub deployment_status_message: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DeploymentStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DeploymentStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/routes.rs b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/routes.rs index 0c4968a4f..2fd6ac9f3 100644 --- a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/routes.rs +++ b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/routes.rs @@ -6,8 +6,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// RouteSpec defines the desired state of Route. -/// Represents a route. +/// RouteSpec defines the desired state of Route. +/// +/// +/// Represents a route. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "apigatewayv2.services.k8s.aws", version = "v1alpha1", kind = "Route", plural = "routes")] #[kube(namespaced)] @@ -18,8 +20,14 @@ pub struct RouteSpec { pub api_id: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiKeyRequired")] pub api_key_required: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiRef")] pub api_ref: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "authorizationScopes")] @@ -28,8 +36,14 @@ pub struct RouteSpec { pub authorization_type: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "authorizerID")] pub authorizer_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "authorizerRef")] pub authorizer_ref: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelSelectionExpression")] @@ -46,38 +60,60 @@ pub struct RouteSpec { pub route_response_selection_expression: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub target: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetRef")] pub target_ref: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteApiRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteApiRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteAuthorizerRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteAuthorizerRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -90,16 +126,24 @@ pub struct RouteRequestParameters { pub required: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTargetRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTargetRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -109,32 +153,49 @@ pub struct RouteTargetRefFrom { /// RouteStatus defines the observed state of Route #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiGatewayManaged")] pub api_gateway_managed: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeID")] pub route_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/stages.rs b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/stages.rs index 5cf4218fa..dacf78fc3 100644 --- a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/stages.rs +++ b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/stages.rs @@ -6,8 +6,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// StageSpec defines the desired state of Stage. -/// Represents an API stage. +/// StageSpec defines the desired state of Stage. +/// +/// +/// Represents an API stage. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "apigatewayv2.services.k8s.aws", version = "v1alpha1", kind = "Stage", plural = "stages")] #[kube(namespaced)] @@ -19,8 +21,14 @@ pub struct StageSpec { pub access_log_settings: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiID")] pub api_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiRef")] pub api_ref: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoDeploy")] @@ -32,8 +40,14 @@ pub struct StageSpec { pub default_route_settings: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "deploymentID")] pub deployment_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "deploymentRef")] pub deployment_ref: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -59,16 +73,24 @@ pub struct StageAccessLogSettings { pub format: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StageApiRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StageApiRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -91,16 +113,24 @@ pub struct StageDefaultRouteSettings { pub throttling_rate_limit: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StageDeploymentRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StageDeploymentRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -125,12 +155,17 @@ pub struct StageRouteSettings { /// StageStatus defines the observed state of Stage #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StageStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiGatewayManaged")] pub api_gateway_managed: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "createdDate")] @@ -141,20 +176,32 @@ pub struct StageStatus { pub last_updated_date: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StageStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StageStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/vpclinks.rs b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/vpclinks.rs index c94e8b8cf..b26259e1a 100644 --- a/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/vpclinks.rs +++ b/kube-custom-resources-rs/src/apigatewayv2_services_k8s_aws/v1alpha1/vpclinks.rs @@ -6,8 +6,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// VpcLinkSpec defines the desired state of VpcLink. -/// Represents a VPC link. +/// VpcLinkSpec defines the desired state of VpcLink. +/// +/// +/// Represents a VPC link. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "apigatewayv2.services.k8s.aws", version = "v1alpha1", kind = "VPCLink", plural = "vpclinks")] #[kube(namespaced)] @@ -26,10 +28,15 @@ pub struct VPCLinkSpec { /// VPCLinkStatus defines the observed state of VPCLink #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCLinkStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "createdDate")] @@ -44,20 +51,32 @@ pub struct VPCLinkStatus { pub vpc_link_version: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCLinkStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCLinkStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/app_redislabs_com/v1/redisenterpriseclusters.rs b/kube-custom-resources-rs/src/app_redislabs_com/v1/redisenterpriseclusters.rs index df65fc776..cb91c19a7 100644 --- a/kube-custom-resources-rs/src/app_redislabs_com/v1/redisenterpriseclusters.rs +++ b/kube-custom-resources-rs/src/app_redislabs_com/v1/redisenterpriseclusters.rs @@ -150,6 +150,9 @@ pub struct RedisEnterpriseClusterSpec { /// Redis upgrade policy to be set on the Redis Enterprise Cluster. Possible values: major/latest This value is used by the cluster to choose the Redis version of the database when an upgrade is performed. The Redis Enterprise Cluster includes multiple versions of OSS Redis that can be used for databases. #[serde(default, skip_serializing_if = "Option::is_none", rename = "redisUpgradePolicy")] pub redis_upgrade_policy: Option, + /// Whether databases will turn on RESP3 compatibility upon database upgrade. Note - Deleting this property after explicitly setting its value shall have no effect. Please view the corresponding field in RS doc for more info. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resp3Default")] + pub resp3_default: Option, /// Name of the service account to use #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccountName")] pub service_account_name: Option, diff --git a/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseactiveactivedatabases.rs b/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseactiveactivedatabases.rs index 420092f92..fd573119f 100644 --- a/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseactiveactivedatabases.rs +++ b/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseactiveactivedatabases.rs @@ -89,6 +89,9 @@ pub struct RedisEnterpriseActiveActiveDatabaseGlobalConfigurations { /// In-memory database replication. When enabled, database will have replica shard for every master - leading to higher availability. #[serde(default, skip_serializing_if = "Option::is_none")] pub replication: Option, + /// Whether this database supports RESP3 protocol. Note - Deleting this property after explicitly setting its value shall have no effect. Please view the corresponding field in RS doc for more info. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resp3: Option, /// The size of the RAM portion of an RoF database. Similarly to "memorySize" use formats like 100MB, 0.1GB It must be at least 10% of combined memory size (RAM+Flash), as specified by "memorySize". #[serde(default, skip_serializing_if = "Option::is_none", rename = "rofRamSize")] pub rof_ram_size: Option, @@ -98,6 +101,9 @@ pub struct RedisEnterpriseActiveActiveDatabaseGlobalConfigurations { /// Number of database server-side shards #[serde(default, skip_serializing_if = "Option::is_none", rename = "shardCount")] pub shard_count: Option, + /// Toggles database sharding for REAADBs (Active Active databases) and enabled by default. This field is blocked for REDB (non-Active Active databases) and sharding is toggled via the shardCount field - when shardCount is 1 this is disabled otherwise enabled. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "shardingEnabled")] + pub sharding_enabled: Option, /// Control the density of shards - should they reside on as few or as many nodes as possible. Available options are "dense" or "sparse". If left unset, defaults to "dense". #[serde(default, skip_serializing_if = "Option::is_none", rename = "shardsPlacement")] pub shards_placement: Option, diff --git a/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseclusters.rs b/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseclusters.rs index 50f9e57bc..b61a9678e 100644 --- a/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseclusters.rs +++ b/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterpriseclusters.rs @@ -107,6 +107,8 @@ pub struct RedisEnterpriseClusterSpec { pub redis_on_flash_spec: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "redisUpgradePolicy")] pub redis_upgrade_policy: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resp3Default")] + pub resp3_default: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccountName")] pub service_account_name: Option, #[serde(default, skip_serializing_if = "Option::is_none")] diff --git a/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterprisedatabases.rs b/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterprisedatabases.rs index 5c4c31c8f..26760d109 100644 --- a/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterprisedatabases.rs +++ b/kube-custom-resources-rs/src/app_redislabs_com/v1alpha1/redisenterprisedatabases.rs @@ -76,6 +76,9 @@ pub struct RedisEnterpriseDatabaseSpec { /// In-memory database replication. When enabled, database will have replica shard for every master - leading to higher availability. #[serde(default, skip_serializing_if = "Option::is_none")] pub replication: Option, + /// Whether this database supports RESP3 protocol. Note - Deleting this property after explicitly setting its value shall have no effect. Please view the corresponding field in RS doc for more info. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resp3: Option, /// The size of the RAM portion of an RoF database. Similarly to "memorySize" use formats like 100MB, 0.1GB. It must be at least 10% of combined memory size (RAM and Flash), as specified by "memorySize". #[serde(default, skip_serializing_if = "Option::is_none", rename = "rofRamSize")] pub rof_ram_size: Option, @@ -85,6 +88,9 @@ pub struct RedisEnterpriseDatabaseSpec { /// Number of database server-side shards #[serde(default, skip_serializing_if = "Option::is_none", rename = "shardCount")] pub shard_count: Option, + /// Toggles database sharding for REAADBs (Active Active databases) and enabled by default. This field is blocked for REDB (non-Active Active databases) and sharding is toggled via the shardCount field - when shardCount is 1 this is disabled otherwise enabled. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "shardingEnabled")] + pub sharding_enabled: Option, /// Control the density of shards - should they reside on as few or as many nodes as possible. Available options are "dense" or "sparse". If left unset, defaults to "dense". #[serde(default, skip_serializing_if = "Option::is_none", rename = "shardsPlacement")] pub shards_placement: Option, diff --git a/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/agentpools.rs b/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/agentpools.rs index 1f948d472..77525c842 100644 --- a/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/agentpools.rs +++ b/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/agentpools.rs @@ -304,9 +304,15 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityPreferredDuringSchedul /// Required. A pod affinity term, associated with the corresponding weight. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { - /// A label query over a set of resources, in this case pods. + /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, + /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, @@ -318,7 +324,7 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityPreferredDuringSchedul pub topology_key: String, } -/// A label query over a set of resources, in this case pods. +/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -367,9 +373,15 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityPreferredDuringSchedul /// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution { - /// A label query over a set of resources, in this case pods. + /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, + /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, @@ -381,7 +393,7 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityRequiredDuringScheduli pub topology_key: String, } -/// A label query over a set of resources, in this case pods. +/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -451,9 +463,15 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityPreferredDuringSch /// Required. A pod affinity term, associated with the corresponding weight. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { - /// A label query over a set of resources, in this case pods. + /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, + /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, @@ -465,7 +483,7 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityPreferredDuringSch pub topology_key: String, } -/// A label query over a set of resources, in this case pods. +/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -514,9 +532,15 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityPreferredDuringSch /// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution { - /// A label query over a set of resources, in this case pods. + /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, + /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] + pub match_label_keys: Option>, + /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] + pub mismatch_label_keys: Option>, /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, @@ -528,7 +552,7 @@ pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityRequiredDuringSche pub topology_key: String, } -/// A label query over a set of resources, in this case pods. +/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -615,6 +639,9 @@ pub struct AgentPoolAgentDeploymentSpecContainers { /// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, + /// RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] + pub restart_policy: Option, /// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, @@ -783,6 +810,9 @@ pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePostStart { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, + /// Sleep represents the duration that the container should sleep before being terminated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sleep: Option, /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, @@ -824,6 +854,13 @@ pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePostStartHttpGetHttpHe pub value: String, } +/// Sleep represents the duration that the container should sleep before being terminated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePostStartSleep { + /// Seconds is the number of seconds to sleep. + pub seconds: i64, +} + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePostStartTcpSocket { @@ -843,6 +880,9 @@ pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePreStop { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, + /// Sleep represents the duration that the container should sleep before being terminated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sleep: Option, /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, @@ -884,6 +924,13 @@ pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePreStopHttpGetHttpHead pub value: String, } +/// Sleep represents the duration that the container should sleep before being terminated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePreStopSleep { + /// Seconds is the number of seconds to sleep. + pub seconds: i64, +} + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecContainersLifecyclePreStopTcpSocket { @@ -1201,7 +1248,7 @@ pub struct AgentPoolAgentDeploymentSpecContainersSecurityContextSeLinuxOptions { /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, /// type indicates which kind of seccomp profile will be applied. Valid options are: @@ -1219,7 +1266,7 @@ pub struct AgentPoolAgentDeploymentSpecContainersSecurityContextWindowsOptions { /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. @@ -1417,6 +1464,9 @@ pub struct AgentPoolAgentDeploymentSpecEphemeralContainers { /// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, + /// Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] + pub restart_policy: Option, /// Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, @@ -1589,6 +1639,9 @@ pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePostStart { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, + /// Sleep represents the duration that the container should sleep before being terminated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sleep: Option, /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, @@ -1630,6 +1683,13 @@ pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePostStartHttp pub value: String, } +/// Sleep represents the duration that the container should sleep before being terminated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePostStartSleep { + /// Seconds is the number of seconds to sleep. + pub seconds: i64, +} + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePostStartTcpSocket { @@ -1649,6 +1709,9 @@ pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePreStop { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, + /// Sleep represents the duration that the container should sleep before being terminated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sleep: Option, /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, @@ -1690,6 +1753,13 @@ pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePreStopHttpGe pub value: String, } +/// Sleep represents the duration that the container should sleep before being terminated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePreStopSleep { + /// Seconds is the number of seconds to sleep. + pub seconds: i64, +} + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecEphemeralContainersLifecyclePreStopTcpSocket { @@ -2007,7 +2077,7 @@ pub struct AgentPoolAgentDeploymentSpecEphemeralContainersSecurityContextSeLinux /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecEphemeralContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, /// type indicates which kind of seccomp profile will be applied. Valid options are: @@ -2025,7 +2095,7 @@ pub struct AgentPoolAgentDeploymentSpecEphemeralContainersSecurityContextWindows /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. @@ -2217,6 +2287,9 @@ pub struct AgentPoolAgentDeploymentSpecInitContainers { /// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, + /// RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] + pub restart_policy: Option, /// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, @@ -2385,6 +2458,9 @@ pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePostStart { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, + /// Sleep represents the duration that the container should sleep before being terminated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sleep: Option, /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, @@ -2426,6 +2502,13 @@ pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePostStartHttpGetHt pub value: String, } +/// Sleep represents the duration that the container should sleep before being terminated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePostStartSleep { + /// Seconds is the number of seconds to sleep. + pub seconds: i64, +} + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePostStartTcpSocket { @@ -2445,6 +2528,9 @@ pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePreStop { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, + /// Sleep represents the duration that the container should sleep before being terminated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub sleep: Option, /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, @@ -2486,6 +2572,13 @@ pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePreStopHttpGetHttp pub value: String, } +/// Sleep represents the duration that the container should sleep before being terminated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePreStopSleep { + /// Seconds is the number of seconds to sleep. + pub seconds: i64, +} + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecInitContainersLifecyclePreStopTcpSocket { @@ -2803,7 +2896,7 @@ pub struct AgentPoolAgentDeploymentSpecInitContainersSecurityContextSeLinuxOptio /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecInitContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, /// type indicates which kind of seccomp profile will be applied. Valid options are: @@ -2821,7 +2914,7 @@ pub struct AgentPoolAgentDeploymentSpecInitContainersSecurityContextWindowsOptio /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. @@ -2987,8 +3080,7 @@ pub struct AgentPoolAgentDeploymentSpecResourceClaimsSource { #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaimName")] pub resource_claim_name: Option, /// ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. - /// The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long). - /// An existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed. + /// The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. /// This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaimTemplateName")] pub resource_claim_template_name: Option, @@ -3058,7 +3150,7 @@ pub struct AgentPoolAgentDeploymentSpecSecurityContextSeLinuxOptions { /// The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, /// type indicates which kind of seccomp profile will be applied. Valid options are: @@ -3085,7 +3177,7 @@ pub struct AgentPoolAgentDeploymentSpecSecurityContextWindowsOptions { /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. @@ -3549,6 +3641,9 @@ pub struct AgentPoolAgentDeploymentSpecVolumesEphemeralVolumeClaimTemplateSpec { /// storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageClassName")] pub storage_class_name: Option, + /// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeAttributesClassName")] + pub volume_attributes_class_name: Option, /// volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMode")] pub volume_mode: Option, @@ -3587,11 +3682,6 @@ pub struct AgentPoolAgentDeploymentSpecVolumesEphemeralVolumeClaimTemplateSpecDa /// resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecVolumesEphemeralVolumeClaimTemplateSpecResources { - /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. It can only be set for containers. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub claims: Option>, /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, @@ -3600,13 +3690,6 @@ pub struct AgentPoolAgentDeploymentSpecVolumesEphemeralVolumeClaimTemplateSpecRe pub requests: Option>, } -/// ResourceClaim references one entry in PodSpec.ResourceClaims. -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct AgentPoolAgentDeploymentSpecVolumesEphemeralVolumeClaimTemplateSpecResourcesClaims { - /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - pub name: String, -} - /// selector is a label query over volumes to consider for binding. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecVolumesEphemeralVolumeClaimTemplateSpecSelector { @@ -3846,6 +3929,12 @@ pub struct AgentPoolAgentDeploymentSpecVolumesProjected { /// Projection that may be projected along with other supported volume types #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecVolumesProjectedSources { + /// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. + /// Alpha, gated by the ClusterTrustBundleProjection feature gate. + /// ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. + /// Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterTrustBundle")] + pub cluster_trust_bundle: Option, /// configMap information about the configMap data to project #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMap")] pub config_map: Option, @@ -3860,6 +3949,51 @@ pub struct AgentPoolAgentDeploymentSpecVolumesProjectedSources { pub service_account_token: Option, } +/// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. +/// Alpha, gated by the ClusterTrustBundleProjection feature gate. +/// ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. +/// Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecVolumesProjectedSourcesClusterTrustBundle { + /// Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] + pub label_selector: Option, + /// Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub optional: Option, + /// Relative path from the volume root to write the bundle. + pub path: String, + /// Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "signerName")] + pub signer_name: Option, +} + +/// Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything". +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecVolumesProjectedSourcesClusterTrustBundleLabelSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct AgentPoolAgentDeploymentSpecVolumesProjectedSourcesClusterTrustBundleLabelSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + /// configMap information about the configMap data to project #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AgentPoolAgentDeploymentSpecVolumesProjectedSourcesConfigMap { diff --git a/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/modules.rs b/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/modules.rs index e2d11a833..db6bc51d7 100644 --- a/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/modules.rs +++ b/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/modules.rs @@ -88,7 +88,7 @@ pub struct ModuleVariables { /// Workspace to execute the module. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModuleWorkspace { - /// Module Workspace ID. Must match pattern: ^ws-[a-zA-Z0-9]+$ + /// Module Workspace ID. Must match pattern: `^ws-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// Module Workspace Name. diff --git a/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/workspaces.rs b/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/workspaces.rs index 1b2b7e64d..61940b827 100644 --- a/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/workspaces.rs +++ b/kube-custom-resources-rs/src/app_terraform_io/v1alpha2/workspaces.rs @@ -61,7 +61,7 @@ pub struct WorkspaceSpec { /// Terraform variables for all plans and applies in this workspace. Variables defined within a workspace always overwrite variables from variable sets that have the same type and the same key. More information: - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/variables - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/variables#terraform-variables #[serde(default, skip_serializing_if = "Option::is_none", rename = "terraformVariables")] pub terraform_variables: Option>, - /// The version of Terraform to use for this workspace. If not specified, the latest available version will be used. Must match pattern: ^\\d{1}\\.\\d{1,2}\\.\\d{1,2}$ More information: - https://www.terraform.io/cloud-docs/workspaces/settings#terraform-version + /// The version of Terraform to use for this workspace. If not specified, the latest available version will be used. Must match pattern: `^\\d{1}\\.\\d{1,2}\\.\\d{1,2}$` More information: - https://www.terraform.io/cloud-docs/workspaces/settings#terraform-version #[serde(default, skip_serializing_if = "Option::is_none", rename = "terraformVersion")] pub terraform_version: Option, /// API Token to be used for API calls. @@ -77,7 +77,7 @@ pub struct WorkspaceSpec { /// Terraform Cloud Agents allow Terraform Cloud to communicate with isolated, private, or on-premises infrastructure. More information: - https://developer.hashicorp.com/terraform/cloud-docs/agents #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkspaceAgentPool { - /// Agent Pool ID. Must match pattern: ^apool-[a-zA-Z0-9]+$ + /// Agent Pool ID. Must match pattern: `^apool-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// Agent Pool name. @@ -167,7 +167,7 @@ pub struct WorkspaceNotifications { /// The type of the notification. Must be one of the following values: `email`, `generic`, `microsoft-teams`, `slack`. #[serde(rename = "type")] pub r#type: WorkspaceNotificationsType, - /// The URL of the notification. Must match pattern: ^https?://.* + /// The URL of the notification. Must match pattern: `^https?://.*` #[serde(default, skip_serializing_if = "Option::is_none")] pub url: Option, } @@ -188,7 +188,7 @@ pub enum WorkspaceNotificationsType { /// Projects let you organize your workspaces into groups. Default: default organization project. More information: - https://developer.hashicorp.com/terraform/tutorials/cloud/projects #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkspaceProject { - /// Project ID. Must match pattern: ^prj-[a-zA-Z0-9]+$ + /// Project ID. Must match pattern: `^prj-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// Project name. @@ -210,7 +210,7 @@ pub struct WorkspaceRemoteStateSharing { /// ConsumerWorkspace allows access to the state for specific workspaces within the same organization. Only one of the fields `ID` or `Name` is allowed. At least one of the fields `ID` or `Name` is mandatory. More information: - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/state#remote-state-access-controls #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkspaceRemoteStateSharingWorkspaces { - /// Consumer Workspace ID. Must match pattern: ^ws-[a-zA-Z0-9]+$ + /// Consumer Workspace ID. Must match pattern: `^ws-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// Consumer Workspace name. @@ -224,7 +224,7 @@ pub struct WorkspaceRunTasks { /// Run Task Enforcement Level. Can be one of `advisory` or `mandatory`. Default: `advisory`. Must be one of the following values: `advisory`, `mandatory` Default: `advisory`. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enforcementLevel")] pub enforcement_level: Option, - /// Run Task ID. Must match pattern: ^task-[a-zA-Z0-9]+$ + /// Run Task ID. Must match pattern: `^task-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// Run Task Name. @@ -238,7 +238,7 @@ pub struct WorkspaceRunTasks { /// RunTrigger allows you to connect this workspace to one or more source workspaces. These connections allow runs to queue automatically in this workspace on successful apply of runs in any of the source workspaces. Only one of the fields `ID` or `Name` is allowed. At least one of the fields `ID` or `Name` is mandatory. More information: - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/settings/run-triggers #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkspaceRunTriggers { - /// Source Workspace ID. Must match pattern: ^ws-[a-zA-Z0-9]+$ + /// Source Workspace ID. Must match pattern: `^ws-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// Source Workspace Name. @@ -249,7 +249,7 @@ pub struct WorkspaceRunTriggers { /// SSH key used to clone Terraform modules. More information: - https://developer.hashicorp.com/terraform/cloud-docs/workspaces/settings/ssh-keys #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkspaceSshKey { - /// SSH key ID. Must match pattern: ^sshkey-[a-zA-Z0-9]+$ + /// SSH key ID. Must match pattern: `^sshkey-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// SSH key name. @@ -295,7 +295,7 @@ pub struct WorkspaceTeamAccessCustom { /// Team to grant access. More information: - https://developer.hashicorp.com/terraform/cloud-docs/users-teams-organizations/teams #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkspaceTeamAccessTeam { - /// Team ID. Must match pattern: ^team-[a-zA-Z0-9]+$ + /// Team ID. Must match pattern: `^team-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, /// Team name. @@ -389,10 +389,10 @@ pub struct WorkspaceVersionControl { /// The repository branch that Run will execute from. This defaults to the repository's default branch (e.g. main). #[serde(default, skip_serializing_if = "Option::is_none")] pub branch: Option, - /// The VCS Connection (OAuth Connection + Token) to use. Must match pattern: ^ot-[a-zA-Z0-9]+$ + /// The VCS Connection (OAuth Connection + Token) to use. Must match pattern: `^ot-[a-zA-Z0-9]+$` #[serde(default, skip_serializing_if = "Option::is_none", rename = "oAuthTokenID")] pub o_auth_token_id: Option, - /// A reference to your VCS repository in the format / where and refer to the organization and repository in your VCS provider. + /// A reference to your VCS repository in the format `/` where `` and `` refer to the organization and repository in your VCS provider. #[serde(default, skip_serializing_if = "Option::is_none")] pub repository: Option, } diff --git a/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalabletargets.rs b/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalabletargets.rs index 00414edc5..bd5665dbb 100644 --- a/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalabletargets.rs +++ b/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalabletargets.rs @@ -5,87 +5,265 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// ScalableTargetSpec defines the desired state of ScalableTarget. -/// Represents a scalable target. +/// ScalableTargetSpec defines the desired state of ScalableTarget. +/// +/// +/// Represents a scalable target. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "applicationautoscaling.services.k8s.aws", version = "v1alpha1", kind = "ScalableTarget", plural = "scalabletargets")] #[kube(namespaced)] #[kube(status = "ScalableTargetStatus")] #[kube(schema = "disabled")] pub struct ScalableTargetSpec { - /// The maximum value that you plan to scale out to. When a scaling policy is in effect, Application Auto Scaling can scale out (expand) as needed to the maximum capacity limit in response to changing demand. This property is required when registering a new scalable target. - /// Although you can specify a large maximum capacity, note that service quotas may impose lower limits. Each service has its own default quotas for the maximum capacity of the resource. If you want to specify a higher limit, you can request an increase. For more information, consult the documentation for that service. For information about the default quotas for each service, see Service Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html) in the Amazon Web Services General Reference. + /// The maximum value that you plan to scale out to. When a scaling policy is + /// in effect, Application Auto Scaling can scale out (expand) as needed to the + /// maximum capacity limit in response to changing demand. This property is required + /// when registering a new scalable target. + /// + /// + /// Although you can specify a large maximum capacity, note that service quotas + /// may impose lower limits. Each service has its own default quotas for the + /// maximum capacity of the resource. If you want to specify a higher limit, + /// you can request an increase. For more information, consult the documentation + /// for that service. For information about the default quotas for each service, + /// see Service Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html) + /// in the Amazon Web Services General Reference. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxCapacity")] pub max_capacity: Option, - /// The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a new scalable target. - /// For certain resources, the minimum value allowed is 0. This includes Lambda provisioned concurrency, Spot Fleet, ECS services, Aurora DB clusters, EMR clusters, and custom resources. For all other resources, the minimum value allowed is 1. + /// The minimum value that you plan to scale in to. When a scaling policy is + /// in effect, Application Auto Scaling can scale in (contract) as needed to + /// the minimum capacity limit in response to changing demand. This property + /// is required when registering a new scalable target. + /// + /// + /// For certain resources, the minimum value allowed is 0. This includes Lambda + /// provisioned concurrency, Spot Fleet, ECS services, Aurora DB clusters, EMR + /// clusters, and custom resources. For all other resources, the minimum value + /// allowed is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "minCapacity")] pub min_capacity: Option, - /// The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier. - /// * ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp. - /// * Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - /// * EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - /// * AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet. - /// * DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table. - /// * DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index. - /// * Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster. - /// * SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - /// * Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - /// * Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - /// * Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. - /// * Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1. - /// * Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable. - /// * Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. - /// * Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. - /// * Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. + /// The identifier of the resource that is associated with the scalable target. + /// This string consists of the resource type and unique identifier. + /// + /// + /// * ECS service - The resource type is service and the unique identifier + /// is the cluster name and service name. Example: service/default/sample-webapp. + /// + /// + /// * Spot Fleet - The resource type is spot-fleet-request and the unique + /// identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + /// + /// + /// * EMR cluster - The resource type is instancegroup and the unique identifier + /// is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. + /// + /// + /// * AppStream 2.0 fleet - The resource type is fleet and the unique identifier + /// is the fleet name. Example: fleet/sample-fleet. + /// + /// + /// * DynamoDB table - The resource type is table and the unique identifier + /// is the table name. Example: table/my-table. + /// + /// + /// * DynamoDB global secondary index - The resource type is index and the + /// unique identifier is the index name. Example: table/my-table/index/my-table-index. + /// + /// + /// * Aurora DB cluster - The resource type is cluster and the unique identifier + /// is the cluster name. Example: cluster:my-db-cluster. + /// + /// + /// * SageMaker endpoint variant - The resource type is variant and the unique + /// identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + /// + /// + /// * Custom resources are not supported with a resource type. This parameter + /// must specify the OutputValue from the CloudFormation template stack used + /// to access the resources. The unique identifier is defined by the service + /// provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). + /// + /// + /// * Amazon Comprehend document classification endpoint - The resource type + /// and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. + /// + /// + /// * Amazon Comprehend entity recognizer endpoint - The resource type and + /// unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + /// + /// + /// * Lambda provisioned concurrency - The resource type is function and the + /// unique identifier is the function name with a function version or alias + /// name suffix that is not $LATEST. Example: function:my-function:prod or + /// function:my-function:1. + /// + /// + /// * Amazon Keyspaces table - The resource type is table and the unique identifier + /// is the table name. Example: keyspace/mykeyspace/table/mytable. + /// + /// + /// * Amazon MSK cluster - The resource type and unique identifier are specified + /// using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + /// + /// + /// * Amazon ElastiCache replication group - The resource type is replication-group + /// and the unique identifier is the replication group name. Example: replication-group/mycluster. + /// + /// + /// * Neptune cluster - The resource type is cluster and the unique identifier + /// is the cluster name. Example: cluster:mycluster. #[serde(rename = "resourceID")] pub resource_id: String, - /// This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. - /// If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which it creates if it does not yet exist. For more information, see Application Auto Scaling IAM roles (https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles). + /// This parameter is required for services that do not support service-linked + /// roles (such as Amazon EMR), and it must specify the ARN of an IAM role that + /// allows Application Auto Scaling to modify the scalable target on your behalf. + /// + /// + /// If the service supports service-linked roles, Application Auto Scaling uses + /// a service-linked role, which it creates if it does not yet exist. For more + /// information, see Application Auto Scaling IAM roles (https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles). #[serde(default, skip_serializing_if = "Option::is_none", rename = "roleARN")] pub role_arn: Option, - /// The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - /// * ecs:service:DesiredCount - The desired task count of an ECS service. - /// * elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. - /// * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - /// * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - /// * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - /// * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - /// * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. - /// * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. - /// * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - /// * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model endpoint variant. - /// * custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. - /// * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. - /// * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint. - /// * lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function. - /// * cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table. - /// * cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table. - /// * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster. - /// * elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group. - /// * elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group. - /// * neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. + /// The scalable dimension associated with the scalable target. This string consists + /// of the service namespace, resource type, and scaling property. + /// + /// + /// * ecs:service:DesiredCount - The desired task count of an ECS service. + /// + /// + /// * elasticmapreduce:instancegroup:InstanceCount - The instance count of + /// an EMR Instance Group. + /// + /// + /// * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot + /// Fleet. + /// + /// + /// * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream + /// 2.0 fleet. + /// + /// + /// * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for + /// a DynamoDB table. + /// + /// + /// * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for + /// a DynamoDB table. + /// + /// + /// * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for + /// a DynamoDB global secondary index. + /// + /// + /// * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for + /// a DynamoDB global secondary index. + /// + /// + /// * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora + /// DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible + /// edition. + /// + /// + /// * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances + /// for an SageMaker model endpoint variant. + /// + /// + /// * custom-resource:ResourceType:Property - The scalable dimension for a + /// custom resource provided by your own application or service. + /// + /// + /// * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The + /// number of inference units for an Amazon Comprehend document classification + /// endpoint. + /// + /// + /// * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + /// of inference units for an Amazon Comprehend entity recognizer endpoint. + /// + /// + /// * lambda:function:ProvisionedConcurrency - The provisioned concurrency + /// for a Lambda function. + /// + /// + /// * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + /// an Amazon Keyspaces table. + /// + /// + /// * cassandra:table:WriteCapacityUnits - The provisioned write capacity + /// for an Amazon Keyspaces table. + /// + /// + /// * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + /// for brokers in an Amazon MSK cluster. + /// + /// + /// * elasticache:replication-group:NodeGroups - The number of node groups + /// for an Amazon ElastiCache replication group. + /// + /// + /// * elasticache:replication-group:Replicas - The number of replicas per + /// node group for an Amazon ElastiCache replication group. + /// + /// + /// * neptune:cluster:ReadReplicaCount - The count of read replicas in an + /// Amazon Neptune DB cluster. #[serde(rename = "scalableDimension")] pub scalable_dimension: String, - /// The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. + /// The namespace of the Amazon Web Services service that provides the resource. + /// For a resource provided by your own application or service, use custom-resource + /// instead. #[serde(rename = "serviceNamespace")] pub service_namespace: String, - /// An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities. - /// Suspension Outcomes - /// * For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended. - /// * For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended. - /// * For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended. - /// For more information, see Suspending and resuming scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) in the Application Auto Scaling User Guide. + /// An embedded object that contains attributes and attribute values that are + /// used to suspend and resume automatic scaling. Setting the value of an attribute + /// to true suspends the specified scaling activities. Setting it to false (default) + /// resumes the specified scaling activities. + /// + /// + /// Suspension Outcomes + /// + /// + /// * For DynamicScalingInSuspended, while a suspension is in effect, all + /// scale-in activities that are triggered by a scaling policy are suspended. + /// + /// + /// * For DynamicScalingOutSuspended, while a suspension is in effect, all + /// scale-out activities that are triggered by a scaling policy are suspended. + /// + /// + /// * For ScheduledScalingSuspended, while a suspension is in effect, all + /// scaling activities that involve scheduled actions are suspended. + /// + /// + /// For more information, see Suspending and resuming scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) + /// in the Application Auto Scaling User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "suspendedState")] pub suspended_state: Option, } -/// An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the specified scaling activities. -/// Suspension Outcomes -/// * For DynamicScalingInSuspended, while a suspension is in effect, all scale-in activities that are triggered by a scaling policy are suspended. -/// * For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended. -/// * For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that involve scheduled actions are suspended. -/// For more information, see Suspending and resuming scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) in the Application Auto Scaling User Guide. +/// An embedded object that contains attributes and attribute values that are +/// used to suspend and resume automatic scaling. Setting the value of an attribute +/// to true suspends the specified scaling activities. Setting it to false (default) +/// resumes the specified scaling activities. +/// +/// +/// Suspension Outcomes +/// +/// +/// * For DynamicScalingInSuspended, while a suspension is in effect, all +/// scale-in activities that are triggered by a scaling policy are suspended. +/// +/// +/// * For DynamicScalingOutSuspended, while a suspension is in effect, all +/// scale-out activities that are triggered by a scaling policy are suspended. +/// +/// +/// * For ScheduledScalingSuspended, while a suspension is in effect, all +/// scaling activities that involve scheduled actions are suspended. +/// +/// +/// For more information, see Suspending and resuming scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) +/// in the Application Auto Scaling User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalableTargetSuspendedState { #[serde(default, skip_serializing_if = "Option::is_none", rename = "dynamicScalingInSuspended")] @@ -99,10 +277,15 @@ pub struct ScalableTargetSuspendedState { /// ScalableTargetStatus defines the observed state of ScalableTarget #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalableTargetStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The Unix timestamp for when the scalable target was created. @@ -113,20 +296,32 @@ pub struct ScalableTargetStatus { pub last_modified_time: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalableTargetStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalableTargetStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalingpolicies.rs b/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalingpolicies.rs index c1744c784..0bb37b951 100644 --- a/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalingpolicies.rs +++ b/kube-custom-resources-rs/src/applicationautoscaling_services_k8s_aws/v1alpha1/scalingpolicies.rs @@ -5,9 +5,15 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// ScalingPolicySpec defines the desired state of ScalingPolicy. -/// Represents a scaling policy to use with Application Auto Scaling. -/// For more information about configuring scaling policies for a specific service, see Getting started with Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/getting-started.html) in the Application Auto Scaling User Guide. +/// ScalingPolicySpec defines the desired state of ScalingPolicy. +/// +/// +/// Represents a scaling policy to use with Application Auto Scaling. +/// +/// +/// For more information about configuring scaling policies for a specific service, +/// see Getting started with Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/getting-started.html) +/// in the Application Auto Scaling User Guide. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "applicationautoscaling.services.k8s.aws", version = "v1alpha1", kind = "ScalingPolicy", plural = "scalingpolicies")] #[kube(namespaced)] @@ -17,70 +23,209 @@ pub struct ScalingPolicySpec { /// The name of the scaling policy. #[serde(rename = "policyName")] pub policy_name: String, - /// The policy type. This parameter is required if you are creating a scaling policy. - /// The following policy types are supported: - /// TargetTrackingScaling—Not supported for Amazon EMR - /// StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune. - /// For more information, see Target tracking scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and Step scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) in the Application Auto Scaling User Guide. + /// The policy type. This parameter is required if you are creating a scaling + /// policy. + /// + /// + /// The following policy types are supported: + /// + /// + /// TargetTrackingScaling—Not supported for Amazon EMR + /// + /// + /// StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon + /// Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune. + /// + /// + /// For more information, see Target tracking scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) + /// and Step scaling policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) + /// in the Application Auto Scaling User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "policyType")] pub policy_type: Option, - /// The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. - /// * ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: service/default/sample-webapp. - /// * Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - /// * EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - /// * AppStream 2.0 fleet - The resource type is fleet and the unique identifier is the fleet name. Example: fleet/sample-fleet. - /// * DynamoDB table - The resource type is table and the unique identifier is the table name. Example: table/my-table. - /// * DynamoDB global secondary index - The resource type is index and the unique identifier is the index name. Example: table/my-table/index/my-table-index. - /// * Aurora DB cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:my-db-cluster. - /// * SageMaker endpoint variant - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - /// * Custom resources are not supported with a resource type. This parameter must specify the OutputValue from the CloudFormation template stack used to access the resources. The unique identifier is defined by the service provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - /// * Amazon Comprehend document classification endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - /// * Amazon Comprehend entity recognizer endpoint - The resource type and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. - /// * Lambda provisioned concurrency - The resource type is function and the unique identifier is the function name with a function version or alias name suffix that is not $LATEST. Example: function:my-function:prod or function:my-function:1. - /// * Amazon Keyspaces table - The resource type is table and the unique identifier is the table name. Example: keyspace/mykeyspace/table/mytable. - /// * Amazon MSK cluster - The resource type and unique identifier are specified using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. - /// * Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. - /// * Neptune cluster - The resource type is cluster and the unique identifier is the cluster name. Example: cluster:mycluster. + /// The identifier of the resource associated with the scaling policy. This string + /// consists of the resource type and unique identifier. + /// + /// + /// * ECS service - The resource type is service and the unique identifier + /// is the cluster name and service name. Example: service/default/sample-webapp. + /// + /// + /// * Spot Fleet - The resource type is spot-fleet-request and the unique + /// identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + /// + /// + /// * EMR cluster - The resource type is instancegroup and the unique identifier + /// is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. + /// + /// + /// * AppStream 2.0 fleet - The resource type is fleet and the unique identifier + /// is the fleet name. Example: fleet/sample-fleet. + /// + /// + /// * DynamoDB table - The resource type is table and the unique identifier + /// is the table name. Example: table/my-table. + /// + /// + /// * DynamoDB global secondary index - The resource type is index and the + /// unique identifier is the index name. Example: table/my-table/index/my-table-index. + /// + /// + /// * Aurora DB cluster - The resource type is cluster and the unique identifier + /// is the cluster name. Example: cluster:my-db-cluster. + /// + /// + /// * SageMaker endpoint variant - The resource type is variant and the unique + /// identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. + /// + /// + /// * Custom resources are not supported with a resource type. This parameter + /// must specify the OutputValue from the CloudFormation template stack used + /// to access the resources. The unique identifier is defined by the service + /// provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). + /// + /// + /// * Amazon Comprehend document classification endpoint - The resource type + /// and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. + /// + /// + /// * Amazon Comprehend entity recognizer endpoint - The resource type and + /// unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + /// + /// + /// * Lambda provisioned concurrency - The resource type is function and the + /// unique identifier is the function name with a function version or alias + /// name suffix that is not $LATEST. Example: function:my-function:prod or + /// function:my-function:1. + /// + /// + /// * Amazon Keyspaces table - The resource type is table and the unique identifier + /// is the table name. Example: keyspace/mykeyspace/table/mytable. + /// + /// + /// * Amazon MSK cluster - The resource type and unique identifier are specified + /// using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + /// + /// + /// * Amazon ElastiCache replication group - The resource type is replication-group + /// and the unique identifier is the replication group name. Example: replication-group/mycluster. + /// + /// + /// * Neptune cluster - The resource type is cluster and the unique identifier + /// is the cluster name. Example: cluster:mycluster. #[serde(rename = "resourceID")] pub resource_id: String, - /// The scalable dimension. This string consists of the service namespace, resource type, and scaling property. - /// * ecs:service:DesiredCount - The desired task count of an ECS service. - /// * elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. - /// * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - /// * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - /// * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - /// * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - /// * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. - /// * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. - /// * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - /// * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model endpoint variant. - /// * custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. - /// * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. - /// * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend entity recognizer endpoint. - /// * lambda:function:ProvisionedConcurrency - The provisioned concurrency for a Lambda function. - /// * cassandra:table:ReadCapacityUnits - The provisioned read capacity for an Amazon Keyspaces table. - /// * cassandra:table:WriteCapacityUnits - The provisioned write capacity for an Amazon Keyspaces table. - /// * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for brokers in an Amazon MSK cluster. - /// * elasticache:replication-group:NodeGroups - The number of node groups for an Amazon ElastiCache replication group. - /// * elasticache:replication-group:Replicas - The number of replicas per node group for an Amazon ElastiCache replication group. - /// * neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. + /// The scalable dimension. This string consists of the service namespace, resource + /// type, and scaling property. + /// + /// + /// * ecs:service:DesiredCount - The desired task count of an ECS service. + /// + /// + /// * elasticmapreduce:instancegroup:InstanceCount - The instance count of + /// an EMR Instance Group. + /// + /// + /// * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot + /// Fleet. + /// + /// + /// * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream + /// 2.0 fleet. + /// + /// + /// * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for + /// a DynamoDB table. + /// + /// + /// * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for + /// a DynamoDB table. + /// + /// + /// * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for + /// a DynamoDB global secondary index. + /// + /// + /// * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for + /// a DynamoDB global secondary index. + /// + /// + /// * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora + /// DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible + /// edition. + /// + /// + /// * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances + /// for an SageMaker model endpoint variant. + /// + /// + /// * custom-resource:ResourceType:Property - The scalable dimension for a + /// custom resource provided by your own application or service. + /// + /// + /// * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The + /// number of inference units for an Amazon Comprehend document classification + /// endpoint. + /// + /// + /// * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + /// of inference units for an Amazon Comprehend entity recognizer endpoint. + /// + /// + /// * lambda:function:ProvisionedConcurrency - The provisioned concurrency + /// for a Lambda function. + /// + /// + /// * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + /// an Amazon Keyspaces table. + /// + /// + /// * cassandra:table:WriteCapacityUnits - The provisioned write capacity + /// for an Amazon Keyspaces table. + /// + /// + /// * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + /// for brokers in an Amazon MSK cluster. + /// + /// + /// * elasticache:replication-group:NodeGroups - The number of node groups + /// for an Amazon ElastiCache replication group. + /// + /// + /// * elasticache:replication-group:Replicas - The number of replicas per + /// node group for an Amazon ElastiCache replication group. + /// + /// + /// * neptune:cluster:ReadReplicaCount - The count of read replicas in an + /// Amazon Neptune DB cluster. #[serde(rename = "scalableDimension")] pub scalable_dimension: String, - /// The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. + /// The namespace of the Amazon Web Services service that provides the resource. + /// For a resource provided by your own application or service, use custom-resource + /// instead. #[serde(rename = "serviceNamespace")] pub service_namespace: String, - /// A step scaling policy. - /// This parameter is required if you are creating a policy and the policy type is StepScaling. + /// A step scaling policy. + /// + /// + /// This parameter is required if you are creating a policy and the policy type + /// is StepScaling. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stepScalingPolicyConfiguration")] pub step_scaling_policy_configuration: Option, - /// A target tracking scaling policy. Includes support for predefined or customized metrics. - /// This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling. + /// A target tracking scaling policy. Includes support for predefined or customized + /// metrics. + /// + /// + /// This parameter is required if you are creating a policy and the policy type + /// is TargetTrackingScaling. #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetTrackingScalingPolicyConfiguration")] pub target_tracking_scaling_policy_configuration: Option, } -/// A step scaling policy. -/// This parameter is required if you are creating a policy and the policy type is StepScaling. +/// A step scaling policy. +/// +/// +/// This parameter is required if you are creating a policy and the policy type +/// is StepScaling. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyStepScalingPolicyConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "adjustmentType")] @@ -95,15 +240,43 @@ pub struct ScalingPolicyStepScalingPolicyConfiguration { pub step_adjustments: Option>, } -/// Represents a step adjustment for a StepScalingPolicyConfiguration (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepScalingPolicyConfiguration.html). Describes an adjustment based on the difference between the value of the aggregated CloudWatch metric and the breach threshold that you've defined for the alarm. -/// For the following examples, suppose that you have an alarm with a breach threshold of 50: -/// * To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10. -/// * To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0. -/// There are a few rules for the step adjustments for your step policy: -/// * The ranges of your step adjustments can't overlap or have a gap. -/// * At most one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound. -/// * At most one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound. -/// * The upper and lower bound can't be null in the same step adjustment. +/// Represents a step adjustment for a StepScalingPolicyConfiguration (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepScalingPolicyConfiguration.html). +/// Describes an adjustment based on the difference between the value of the +/// aggregated CloudWatch metric and the breach threshold that you've defined +/// for the alarm. +/// +/// +/// For the following examples, suppose that you have an alarm with a breach +/// threshold of 50: +/// +/// +/// * To trigger the adjustment when the metric is greater than or equal to +/// 50 and less than 60, specify a lower bound of 0 and an upper bound of +/// 10. +/// +/// +/// * To trigger the adjustment when the metric is greater than 40 and less +/// than or equal to 50, specify a lower bound of -10 and an upper bound of +/// 0. +/// +/// +/// There are a few rules for the step adjustments for your step policy: +/// +/// +/// * The ranges of your step adjustments can't overlap or have a gap. +/// +/// +/// * At most one step adjustment can have a null lower bound. If one step +/// adjustment has a negative lower bound, then there must be a step adjustment +/// with a null lower bound. +/// +/// +/// * At most one step adjustment can have a null upper bound. If one step +/// adjustment has a positive upper bound, then there must be a step adjustment +/// with a null upper bound. +/// +/// +/// * The upper and lower bound can't be null in the same step adjustment. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyStepScalingPolicyConfigurationStepAdjustments { #[serde(default, skip_serializing_if = "Option::is_none", rename = "metricIntervalLowerBound")] @@ -114,22 +287,53 @@ pub struct ScalingPolicyStepScalingPolicyConfigurationStepAdjustments { pub scaling_adjustment: Option, } -/// A target tracking scaling policy. Includes support for predefined or customized metrics. -/// This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling. +/// A target tracking scaling policy. Includes support for predefined or customized +/// metrics. +/// +/// +/// This parameter is required if you are creating a policy and the policy type +/// is TargetTrackingScaling. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyTargetTrackingScalingPolicyConfiguration { - /// Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling. - /// For information about the available metrics for a service, see Amazon Web Services Services That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) in the Amazon CloudWatch User Guide. - /// To create your customized metric specification: - /// * Add values for each required parameter from CloudWatch. You can use an existing metric, or a new metric that you create. To use your own metric, you must first publish the metric to CloudWatch. For more information, see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) in the Amazon CloudWatch User Guide. - /// * Choose a metric that changes proportionally with capacity. The value of the metric should increase or decrease in inverse proportion to the number of capacity units. That is, the value of the metric should decrease when capacity increases, and increase when capacity decreases. - /// For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). + /// Represents a CloudWatch metric of your choosing for a target tracking scaling + /// policy to use with Application Auto Scaling. + /// + /// + /// For information about the available metrics for a service, see Amazon Web + /// Services Services That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) + /// in the Amazon CloudWatch User Guide. + /// + /// + /// To create your customized metric specification: + /// + /// + /// * Add values for each required parameter from CloudWatch. You can use + /// an existing metric, or a new metric that you create. To use your own metric, + /// you must first publish the metric to CloudWatch. For more information, + /// see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) + /// in the Amazon CloudWatch User Guide. + /// + /// + /// * Choose a metric that changes proportionally with capacity. The value + /// of the metric should increase or decrease in inverse proportion to the + /// number of capacity units. That is, the value of the metric should decrease + /// when capacity increases, and increase when capacity decreases. + /// + /// + /// For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "customizedMetricSpecification")] pub customized_metric_specification: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableScaleIn")] pub disable_scale_in: Option, - /// Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling. - /// Only the Amazon Web Services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Building dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) in the Application Auto Scaling User Guide. + /// Represents a predefined metric for a target tracking scaling policy to use + /// with Application Auto Scaling. + /// + /// + /// Only the Amazon Web Services that you're using send metrics to Amazon CloudWatch. + /// To determine whether a desired metric already exists by looking up its namespace + /// and dimension using the CloudWatch metrics dashboard in the console, follow + /// the procedure in Building dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) + /// in the Application Auto Scaling User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "predefinedMetricSpecification")] pub predefined_metric_specification: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "scaleInCooldown")] @@ -140,12 +344,32 @@ pub struct ScalingPolicyTargetTrackingScalingPolicyConfiguration { pub target_value: Option, } -/// Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Application Auto Scaling. -/// For information about the available metrics for a service, see Amazon Web Services Services That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) in the Amazon CloudWatch User Guide. -/// To create your customized metric specification: -/// * Add values for each required parameter from CloudWatch. You can use an existing metric, or a new metric that you create. To use your own metric, you must first publish the metric to CloudWatch. For more information, see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) in the Amazon CloudWatch User Guide. -/// * Choose a metric that changes proportionally with capacity. The value of the metric should increase or decrease in inverse proportion to the number of capacity units. That is, the value of the metric should decrease when capacity increases, and increase when capacity decreases. -/// For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). +/// Represents a CloudWatch metric of your choosing for a target tracking scaling +/// policy to use with Application Auto Scaling. +/// +/// +/// For information about the available metrics for a service, see Amazon Web +/// Services Services That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) +/// in the Amazon CloudWatch User Guide. +/// +/// +/// To create your customized metric specification: +/// +/// +/// * Add values for each required parameter from CloudWatch. You can use +/// an existing metric, or a new metric that you create. To use your own metric, +/// you must first publish the metric to CloudWatch. For more information, +/// see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) +/// in the Amazon CloudWatch User Guide. +/// +/// +/// * Choose a metric that changes proportionally with capacity. The value +/// of the metric should increase or decrease in inverse proportion to the +/// number of capacity units. That is, the value of the metric should decrease +/// when capacity increases, and increase when capacity decreases. +/// +/// +/// For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyTargetTrackingScalingPolicyConfigurationCustomizedMetricSpecification { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -169,8 +393,15 @@ pub struct ScalingPolicyTargetTrackingScalingPolicyConfigurationCustomizedMetric pub value: Option, } -/// Represents a predefined metric for a target tracking scaling policy to use with Application Auto Scaling. -/// Only the Amazon Web Services that you're using send metrics to Amazon CloudWatch. To determine whether a desired metric already exists by looking up its namespace and dimension using the CloudWatch metrics dashboard in the console, follow the procedure in Building dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) in the Application Auto Scaling User Guide. +/// Represents a predefined metric for a target tracking scaling policy to use +/// with Application Auto Scaling. +/// +/// +/// Only the Amazon Web Services that you're using send metrics to Amazon CloudWatch. +/// To determine whether a desired metric already exists by looking up its namespace +/// and dimension using the CloudWatch metrics dashboard in the console, follow +/// the procedure in Building dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) +/// in the Application Auto Scaling User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "predefinedMetricType")] @@ -182,13 +413,18 @@ pub struct ScalingPolicyTargetTrackingScalingPolicyConfigurationPredefinedMetric /// ScalingPolicyStatus defines the observed state of ScalingPolicy #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, /// The CloudWatch alarms created for the target tracking scaling policy. #[serde(default, skip_serializing_if = "Option::is_none")] pub alarms: Option>, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The Unix timestamp for when the scaling policy was created. @@ -199,13 +435,23 @@ pub struct ScalingPolicyStatus { pub last_modified_time: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. @@ -221,7 +467,9 @@ pub struct ScalingPolicyStatusAlarms { pub alarm_name: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ScalingPolicyStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/argoproj_io/v1alpha1/argocds.rs b/kube-custom-resources-rs/src/argoproj_io/v1alpha1/argocds.rs index 05875adbe..04e2a023e 100644 --- a/kube-custom-resources-rs/src/argoproj_io/v1alpha1/argocds.rs +++ b/kube-custom-resources-rs/src/argoproj_io/v1alpha1/argocds.rs @@ -45,7 +45,7 @@ pub struct ArgoCDSpec { /// GATrackingID is the google analytics tracking ID to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gaTrackingID")] pub ga_tracking_id: Option, - /// Grafana defines the Grafana server options for ArgoCD. + /// Deprecated: Grafana defines the Grafana server options for ArgoCD. #[serde(default, skip_serializing_if = "Option::is_none")] pub grafana: Option, /// HA options for High Availability support for the Redis component. @@ -587,7 +587,7 @@ pub struct ArgoCDDexResourcesClaims { pub name: String, } -/// Grafana defines the Grafana server options for ArgoCD. +/// Deprecated: Grafana defines the Grafana server options for ArgoCD. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ArgoCDGrafana { /// Enabled will toggle Grafana support globally for ArgoCD. diff --git a/kube-custom-resources-rs/src/argoproj_io/v1beta1/argocds.rs b/kube-custom-resources-rs/src/argoproj_io/v1beta1/argocds.rs index 864ff588f..e35a4e92f 100644 --- a/kube-custom-resources-rs/src/argoproj_io/v1beta1/argocds.rs +++ b/kube-custom-resources-rs/src/argoproj_io/v1beta1/argocds.rs @@ -42,7 +42,7 @@ pub struct ArgoCDSpec { /// GATrackingID is the google analytics tracking ID to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gaTrackingID")] pub ga_tracking_id: Option, - /// Grafana defines the Grafana server options for ArgoCD. + /// Deprecated: Grafana defines the Grafana server options for ArgoCD. #[serde(default, skip_serializing_if = "Option::is_none")] pub grafana: Option, /// HA options for High Availability support for the Redis component. @@ -161,9 +161,15 @@ pub struct ArgoCDApplicationSet { /// Resources defines the Compute Resources required by the container for ApplicationSet. #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, + /// SCMProviders defines the list of allowed custom SCM provider API URLs + #[serde(default, skip_serializing_if = "Option::is_none", rename = "scmProviders")] + pub scm_providers: Option>, /// SCMRootCAConfigMap is the name of the config map that stores the Gitlab SCM Provider's TLS certificate which will be mounted on the ApplicationSet Controller (optional). #[serde(default, skip_serializing_if = "Option::is_none", rename = "scmRootCAConfigMap")] pub scm_root_ca_config_map: Option, + /// SourceNamespaces defines the namespaces applicationset resources are allowed to be created in + #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceNamespaces")] + pub source_namespaces: Option>, /// Version is the Argo CD ApplicationSet image tag. (optional) #[serde(default, skip_serializing_if = "Option::is_none")] pub version: Option, @@ -544,7 +550,7 @@ pub struct ArgoCDControllerSharding { pub replicas: Option, } -/// Grafana defines the Grafana server options for ArgoCD. +/// Deprecated: Grafana defines the Grafana server options for ArgoCD. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ArgoCDGrafana { /// Enabled will toggle Grafana support globally for ArgoCD. diff --git a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephblockpools.rs b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephblockpools.rs index 963500cdf..165a51508 100644 --- a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephblockpools.rs +++ b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephblockpools.rs @@ -13,6 +13,9 @@ use std::collections::BTreeMap; #[kube(status = "CephBlockPoolStatus")] #[kube(schema = "disabled")] pub struct CephBlockPoolSpec { + /// The application name to set on the pool. Only expected to be set for rgw pools. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application: Option, /// DEPRECATED: use Parameters instead, e.g. #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionMode")] pub compression_mode: Option, @@ -122,8 +125,8 @@ pub struct CephBlockPoolMirroringSnapshotSchedules { /// NamedBlockPoolSpec allows a block pool to be created with a non-default name. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum CephBlockPoolName { - #[serde(rename = "device_health_metrics")] - DeviceHealthMetrics, + #[serde(rename = ".rgw.root")] + RgwRoot, #[serde(rename = ".nfs")] Nfs, #[serde(rename = ".mgr")] diff --git a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephfilesystems.rs b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephfilesystems.rs index 9e4c771c7..62b73c209 100644 --- a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephfilesystems.rs +++ b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephfilesystems.rs @@ -40,6 +40,9 @@ pub struct CephFilesystemSpec { /// NamedPoolSpec represents the named ceph pool spec #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct CephFilesystemDataPools { + /// The application name to set on the pool. Only expected to be set for rgw pools. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application: Option, /// DEPRECATED: use Parameters instead, e.g. #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionMode")] pub compression_mode: Option, @@ -216,6 +219,9 @@ pub struct CephFilesystemDataPoolsStatusCheckMirror { /// The metadata pool settings #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct CephFilesystemMetadataPool { + /// The application name to set on the pool. Only expected to be set for rgw pools. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application: Option, /// DEPRECATED: use Parameters instead, e.g. #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionMode")] pub compression_mode: Option, diff --git a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectstores.rs b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectstores.rs index 5ada86649..c4f3525e6 100644 --- a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectstores.rs +++ b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectstores.rs @@ -43,6 +43,9 @@ pub struct CephObjectStoreSpec { /// The data pool settings #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct CephObjectStoreDataPool { + /// The application name to set on the pool. Only expected to be set for rgw pools. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application: Option, /// DEPRECATED: use Parameters instead, e.g. #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionMode")] pub compression_mode: Option, @@ -1029,6 +1032,9 @@ pub struct CephObjectStoreHealthCheckStartupProbeProbeTcpSocket { /// The metadata pool settings #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct CephObjectStoreMetadataPool { + /// The application name to set on the pool. Only expected to be set for rgw pools. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application: Option, /// DEPRECATED: use Parameters instead, e.g. #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionMode")] pub compression_mode: Option, diff --git a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectzones.rs b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectzones.rs index ca3fc5d33..7caefe336 100644 --- a/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectzones.rs +++ b/kube-custom-resources-rs/src/ceph_rook_io/v1/cephobjectzones.rs @@ -33,6 +33,9 @@ pub struct CephObjectZoneSpec { /// The data pool settings #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct CephObjectZoneDataPool { + /// The application name to set on the pool. Only expected to be set for rgw pools. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application: Option, /// DEPRECATED: use Parameters instead, e.g. #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionMode")] pub compression_mode: Option, @@ -206,6 +209,9 @@ pub struct CephObjectZoneDataPoolStatusCheckMirror { /// The metadata pool settings #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct CephObjectZoneMetadataPool { + /// The application name to set on the pool. Only expected to be set for rgw pools. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application: Option, /// DEPRECATED: use Parameters instead, e.g. #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionMode")] pub compression_mode: Option, diff --git a/kube-custom-resources-rs/src/cert_manager_io/v1/clusterissuers.rs b/kube-custom-resources-rs/src/cert_manager_io/v1/clusterissuers.rs index 2c7962288..7afc80e67 100644 --- a/kube-custom-resources-rs/src/cert_manager_io/v1/clusterissuers.rs +++ b/kube-custom-resources-rs/src/cert_manager_io/v1/clusterissuers.rs @@ -1159,9 +1159,6 @@ pub struct ClusterIssuerVaultAuthAppRoleSecretRef { /// Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterIssuerVaultAuthKubernetes { - /// TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub audiences: Option>, /// The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPath")] pub mount_path: Option, @@ -1188,6 +1185,9 @@ pub struct ClusterIssuerVaultAuthKubernetesSecretRef { /// A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterIssuerVaultAuthKubernetesServiceAccountRef { + /// TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub audiences: Option>, /// Name of the ServiceAccount used to request a token. pub name: String, } diff --git a/kube-custom-resources-rs/src/cert_manager_io/v1/issuers.rs b/kube-custom-resources-rs/src/cert_manager_io/v1/issuers.rs index 35e9327ce..7e0ce9f2d 100644 --- a/kube-custom-resources-rs/src/cert_manager_io/v1/issuers.rs +++ b/kube-custom-resources-rs/src/cert_manager_io/v1/issuers.rs @@ -1160,9 +1160,6 @@ pub struct IssuerVaultAuthAppRoleSecretRef { /// Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct IssuerVaultAuthKubernetes { - /// TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub audiences: Option>, /// The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPath")] pub mount_path: Option, @@ -1189,6 +1186,9 @@ pub struct IssuerVaultAuthKubernetesSecretRef { /// A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct IssuerVaultAuthKubernetesServiceAccountRef { + /// TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token consisting of the issuer's namespace and name is always included. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub audiences: Option>, /// Name of the ServiceAccount used to request a token. pub name: String, } diff --git a/kube-custom-resources-rs/src/cilium_io/v2/ciliumendpoints.rs b/kube-custom-resources-rs/src/cilium_io/v2/ciliumendpoints.rs index 48894201f..2734ddc6d 100644 --- a/kube-custom-resources-rs/src/cilium_io/v2/ciliumendpoints.rs +++ b/kube-custom-resources-rs/src/cilium_io/v2/ciliumendpoints.rs @@ -2,7 +2,7 @@ // kopium command: kopium --docs --filename=./crd-catalog/cilium/cilium/cilium.io/v2/ciliumendpoints.yaml --derive=Default --derive=PartialEq // kopium version: 0.16.5 -use kube::CustomResource; + use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; diff --git a/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumbgppeeringpolicies.rs b/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumbgppeeringpolicies.rs index 20f8d23a4..a91eaea2d 100644 --- a/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumbgppeeringpolicies.rs +++ b/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumbgppeeringpolicies.rs @@ -137,9 +137,12 @@ pub struct CiliumBGPPeeringPolicyVirtualRoutersNeighborsAdvertisedPathAttributes /// Large holds a list of the BGP Large Communities Attribute (RFC 8092) values. #[serde(default, skip_serializing_if = "Option::is_none")] pub large: Option>, - /// Standard holds a list of "standard" 32-bit BGP Communities Attribute (RFC 1997) values. + /// Standard holds a list of "standard" 32-bit BGP Communities Attribute (RFC 1997) values defined as numeric values. #[serde(default, skip_serializing_if = "Option::is_none")] pub standard: Option>, + /// WellKnown holds a list "standard" 32-bit BGP Communities Attribute (RFC 1997) values defined as well-known string aliases to their numeric values. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "wellKnown")] + pub well_known: Option>, } /// Selector selects a group of objects of the SelectorType resulting into routes that will be announced with the configured Attributes. If nil / not set, all objects of the SelectorType are selected. diff --git a/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumendpointslices.rs b/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumendpointslices.rs index 7d365538d..bb8d30fcb 100644 --- a/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumendpointslices.rs +++ b/kube-custom-resources-rs/src/cilium_io/v2alpha1/ciliumendpointslices.rs @@ -2,7 +2,7 @@ // kopium command: kopium --docs --filename=./crd-catalog/cilium/cilium/cilium.io/v2alpha1/ciliumendpointslices.yaml --derive=Default --derive=PartialEq // kopium version: 0.16.5 -use kube::CustomResource; + use serde::{Serialize, Deserialize}; /// CoreCiliumEndpoint is slim version of status of CiliumEndpoint. diff --git a/kube-custom-resources-rs/src/claudie_io/v1beta1/inputmanifests.rs b/kube-custom-resources-rs/src/claudie_io/v1beta1/inputmanifests.rs index 56143e4e2..f0b8387eb 100644 --- a/kube-custom-resources-rs/src/claudie_io/v1beta1/inputmanifests.rs +++ b/kube-custom-resources-rs/src/claudie_io/v1beta1/inputmanifests.rs @@ -171,9 +171,9 @@ pub struct InputManifestNodePoolsDynamic { /// Type of the machines in the nodepool. Currently, only AMD64 machines are supported. #[serde(rename = "serverType")] pub server_type: String, - /// Size of the storage disk on the nodes in the nodepool in GB. The OS disk is created automatically with predefined size of 100GB for kubernetes nodes and 50GB for Loadbalancer nodes. + /// Size of the storage disk on the nodes in the nodepool in GB. The OS disk is created automatically with predefined size of 100GB for kubernetes nodes and 50GB for Loadbalancer nodes. The value must be either -1 (no disk is created), or >= 50. If no value is specified, 50 is used. #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageDiskSize")] - pub storage_disk_size: Option, + pub storage_disk_size: Option, /// User defined taints for this nodepool. #[serde(default, skip_serializing_if = "Option::is_none")] pub taints: Option>, @@ -207,7 +207,8 @@ pub struct InputManifestNodePoolsDynamicProviderSpec { /// Region of the nodepool. pub region: String, /// Zone of the nodepool. - pub zone: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub zone: Option, } /// The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. @@ -304,6 +305,8 @@ pub enum InputManifestProvidersProviderType { Cloudflare, #[serde(rename = "hetznerdns")] Hetznerdns, + #[serde(rename = "genesiscloud")] + Genesiscloud, } /// SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/mod.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/mod.rs index c464a3de5..fbf74dd15 100644 --- a/kube-custom-resources-rs/src/cluster_x_k8s_io/mod.rs +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/mod.rs @@ -1,2 +1,3 @@ +pub mod v1alpha3; pub mod v1alpha4; pub mod v1beta1; diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/clusters.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/clusters.rs new file mode 100644 index 000000000..0cc85cff0 --- /dev/null +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/clusters.rs @@ -0,0 +1,231 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/clusters.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// ClusterSpec defines the desired state of Cluster. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "cluster.x-k8s.io", version = "v1alpha3", kind = "Cluster", plural = "clusters")] +#[kube(namespaced)] +#[kube(status = "ClusterStatus")] +#[kube(schema = "disabled")] +pub struct ClusterSpec { + /// Cluster network configuration. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterNetwork")] + pub cluster_network: Option, + /// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneEndpoint")] + pub control_plane_endpoint: Option, + /// ControlPlaneRef is an optional reference to a provider-specific resource that holds + /// the details for provisioning the Control Plane for a Cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneRef")] + pub control_plane_ref: Option, + /// InfrastructureRef is a reference to a provider-specific resource that holds the details + /// for provisioning infrastructure for a cluster in said provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "infrastructureRef")] + pub infrastructure_ref: Option, + /// Paused can be used to prevent controllers from processing the Cluster and all its associated objects. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub paused: Option, +} + +/// Cluster network configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterClusterNetwork { + /// APIServerPort specifies the port the API Server should bind to. + /// Defaults to 6443. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiServerPort")] + pub api_server_port: Option, + /// The network ranges from which Pod networks are allocated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pods: Option, + /// Domain name for services. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceDomain")] + pub service_domain: Option, + /// The network ranges from which service VIPs are allocated. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub services: Option, +} + +/// The network ranges from which Pod networks are allocated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterClusterNetworkPods { + #[serde(rename = "cidrBlocks")] + pub cidr_blocks: Vec, +} + +/// The network ranges from which service VIPs are allocated. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterClusterNetworkServices { + #[serde(rename = "cidrBlocks")] + pub cidr_blocks: Vec, +} + +/// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterControlPlaneEndpoint { + /// The hostname on which the API server is serving. + pub host: String, + /// The port on which the API server is serving. + pub port: i32, +} + +/// ControlPlaneRef is an optional reference to a provider-specific resource that holds +/// the details for provisioning the Control Plane for a Cluster. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterControlPlaneRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// InfrastructureRef is a reference to a provider-specific resource that holds the details +/// for provisioning infrastructure for a cluster in said provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterInfrastructureRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// ClusterStatus defines the observed state of Cluster. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterStatus { + /// Conditions defines current service state of the cluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// ControlPlaneInitialized defines if the control plane has been initialized. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneInitialized")] + pub control_plane_initialized: Option, + /// ControlPlaneReady defines if the control plane is ready. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneReady")] + pub control_plane_ready: Option, + /// FailureDomains is a slice of failure domain objects synced from the infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomains")] + pub failure_domains: Option>, + /// FailureMessage indicates that there is a fatal problem reconciling the + /// state, and will be set to a descriptive error message. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// FailureReason indicates that there is a fatal problem reconciling the + /// state, and will be set to a token value suitable for + /// programmatic interpretation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// InfrastructureReady is the state of the infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "infrastructureReady")] + pub infrastructure_ready: Option, + /// ObservedGeneration is the latest generation observed by the controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// Phase represents the current phase of cluster actuation. + /// E.g. Pending, Running, Terminating, Failed etc. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub phase: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterStatusConditions { + /// Last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when + /// the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. + /// This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. + /// The specific API may choose whether or not this field is considered a guaranteed API. + /// This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately + /// understand the current situation and act accordingly. + /// The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + /// can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// FailureDomains is a slice of failure domain objects synced from the infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct ClusterStatusFailureDomains { + /// Attributes is a free form map of attributes an infrastructure provider might use or require. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub attributes: Option>, + /// ControlPlane determines if this failure domain is suitable for use by control plane machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlane")] + pub control_plane: Option, +} + diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinedeployments.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinedeployments.rs new file mode 100644 index 000000000..2f0f60cf4 --- /dev/null +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinedeployments.rs @@ -0,0 +1,439 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinedeployments.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// MachineDeploymentSpec defines the desired state of MachineDeployment. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "cluster.x-k8s.io", version = "v1alpha3", kind = "MachineDeployment", plural = "machinedeployments")] +#[kube(namespaced)] +#[kube(status = "MachineDeploymentStatus")] +#[kube(schema = "disabled")] +pub struct MachineDeploymentSpec { + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// Minimum number of seconds for which a newly created machine should + /// be ready. + /// Defaults to 0 (machine will be considered available as soon as it + /// is ready) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minReadySeconds")] + pub min_ready_seconds: Option, + /// Indicates that the deployment is paused. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub paused: Option, + /// The maximum time in seconds for a deployment to make progress before it + /// is considered to be failed. The deployment controller will continue to + /// process failed deployments and a condition with a ProgressDeadlineExceeded + /// reason will be surfaced in the deployment status. Note that progress will + /// not be estimated during the time a deployment is paused. Defaults to 600s. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "progressDeadlineSeconds")] + pub progress_deadline_seconds: Option, + /// Number of desired machines. Defaults to 1. + /// This is a pointer to distinguish between explicit zero and not specified. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// The number of old MachineSets to retain to allow rollback. + /// This is a pointer to distinguish between explicit zero and not specified. + /// Defaults to 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "revisionHistoryLimit")] + pub revision_history_limit: Option, + /// Label selector for machines. Existing MachineSets whose machines are + /// selected by this will be the ones affected by this deployment. + /// It must match the machine template's labels. + pub selector: MachineDeploymentSelector, + /// The deployment strategy to use to replace existing machines with + /// new ones. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub strategy: Option, + /// Template describes the machines that will be created. + pub template: MachineDeploymentTemplate, +} + +/// Label selector for machines. Existing MachineSets whose machines are +/// selected by this will be the ones affected by this deployment. +/// It must match the machine template's labels. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// The deployment strategy to use to replace existing machines with +/// new ones. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentStrategy { + /// Rolling update config params. Present only if + /// MachineDeploymentStrategyType = RollingUpdate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "rollingUpdate")] + pub rolling_update: Option, + /// Type of deployment. Currently the only supported strategy is + /// "RollingUpdate". + /// Default is RollingUpdate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// Rolling update config params. Present only if +/// MachineDeploymentStrategyType = RollingUpdate. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentStrategyRollingUpdate { + /// The maximum number of machines that can be scheduled above the + /// desired number of machines. + /// Value can be an absolute number (ex: 5) or a percentage of + /// desired machines (ex: 10%). + /// This can not be 0 if MaxUnavailable is 0. + /// Absolute number is calculated from percentage by rounding up. + /// Defaults to 1. + /// Example: when this is set to 30%, the new MachineSet can be scaled + /// up immediately when the rolling update starts, such that the total + /// number of old and new machines do not exceed 130% of desired + /// machines. Once old machines have been killed, new MachineSet can + /// be scaled up further, ensuring that total number of machines running + /// at any time during the update is at most 130% of desired machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxSurge")] + pub max_surge: Option, + /// The maximum number of machines that can be unavailable during the update. + /// Value can be an absolute number (ex: 5) or a percentage of desired + /// machines (ex: 10%). + /// Absolute number is calculated from percentage by rounding down. + /// This can not be 0 if MaxSurge is 0. + /// Defaults to 0. + /// Example: when this is set to 30%, the old MachineSet can be scaled + /// down to 70% of desired machines immediately when the rolling update + /// starts. Once new machines are ready, old MachineSet can be scaled + /// down further, followed by scaling up the new MachineSet, ensuring + /// that the total number of machines available at all times + /// during the update is at least 70% of desired machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxUnavailable")] + pub max_unavailable: Option, +} + +/// Template describes the machines that will be created. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentTemplate { + /// Standard object's metadata. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Specification of the desired behavior of the machine. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub spec: Option, +} + +/// Standard object's metadata. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentTemplateMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be + /// set by external tools to store and retrieve arbitrary metadata. They are not + /// queryable and should be preserved when modifying objects. + /// More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique + /// name ONLY IF the Name field has not been provided. + /// If this field is used, the name returned to the client will be different + /// than the name passed. This value will also be combined with a unique suffix. + /// The provided value has the same validation rules as the Name field, + /// and may be truncated by the length of the suffix required to make the value + /// unique on the server. + /// + /// + /// If this field is specified and the generated name exists, the server will + /// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + /// ServerTimeout indicating a unique name could not be found in the time allotted, and the client + /// should retry (optionally after the time indicated in the Retry-After header). + /// + /// + /// Applied only if Name is not specified. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize + /// (scope and select) objects. May match selectors of replication controllers + /// and services. + /// More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although + /// some resources may allow a client to request the generation of an appropriate name + /// automatically. Name is primarily intended for creation idempotence and configuration + /// definition. + /// Cannot be updated. + /// More info: http://kubernetes.io/docs/user-guide/identifiers#names + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is + /// equivalent to the "default" namespace, but "default" is the canonical representation. + /// Not all objects are required to be scoped to a namespace - the value of this field for + /// those objects will be empty. + /// + /// + /// Must be a DNS_LABEL. + /// Cannot be updated. + /// More info: http://kubernetes.io/docs/user-guide/namespaces + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have + /// been deleted, this object will be garbage collected. If this object is managed by a controller, + /// then an entry in this list will point to this controller, with the controller field set to true. + /// There cannot be more than one managing controller. + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning +/// object. An owning object must be in the same namespace as the dependent, or +/// be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentTemplateMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then + /// the owner cannot be deleted from the key-value store until this + /// reference is removed. + /// See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + /// for how the garbage collector interacts with this field and enforces the foreground deletion. + /// Defaults to false. + /// To set this field, a user needs "delete" permission of the owner, + /// otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// Specification of the desired behavior of the machine. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentTemplateSpec { + /// Bootstrap is a reference to a local struct which encapsulates + /// fields to configure the Machine’s bootstrapping mechanism. + pub bootstrap: MachineDeploymentTemplateSpecBootstrap, + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// FailureDomain is the failure domain the machine will be created in. + /// Must match a key in the FailureDomains map stored on the cluster object. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// InfrastructureRef is a required reference to a custom resource + /// offered by an infrastructure provider. + #[serde(rename = "infrastructureRef")] + pub infrastructure_ref: MachineDeploymentTemplateSpecInfrastructureRef, + /// NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + /// The default value is 0, meaning that the node can be drained without any time limitations. + /// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDrainTimeout")] + pub node_drain_timeout: Option, + /// ProviderID is the identification ID of the machine provided by the provider. + /// This field must match the provider ID as seen on the node object corresponding to this machine. + /// This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + /// with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + /// machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + /// generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + /// able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + /// and then a comparison is done to find out unregistered machines and are marked for delete. + /// This field will be set by the actuators and consumed by higher level entities like autoscaler that will + /// be interfacing with cluster-api as generic provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// Version defines the desired Kubernetes version. + /// This field is meant to be optionally used by bootstrap providers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// Bootstrap is a reference to a local struct which encapsulates +/// fields to configure the Machine’s bootstrapping mechanism. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentTemplateSpecBootstrap { + /// ConfigRef is a reference to a bootstrap provider-specific resource + /// that holds configuration details. The reference is optional to + /// allow users/operators to specify Bootstrap.Data without + /// the need of a controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "configRef")] + pub config_ref: Option, + /// Data contains the bootstrap data, such as cloud-init details scripts. + /// If nil, the Machine should remain in the Pending state. + /// + /// + /// Deprecated: Switch to DataSecretName. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub data: Option, + /// DataSecretName is the name of the secret that stores the bootstrap data script. + /// If nil, the Machine should remain in the Pending state. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSecretName")] + pub data_secret_name: Option, +} + +/// ConfigRef is a reference to a bootstrap provider-specific resource +/// that holds configuration details. The reference is optional to +/// allow users/operators to specify Bootstrap.Data without +/// the need of a controller. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentTemplateSpecBootstrapConfigRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// InfrastructureRef is a required reference to a custom resource +/// offered by an infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentTemplateSpecInfrastructureRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// MachineDeploymentStatus defines the observed state of MachineDeployment. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineDeploymentStatus { + /// Total number of available machines (ready for at least minReadySeconds) + /// targeted by this deployment. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availableReplicas")] + pub available_replicas: Option, + /// The generation observed by the deployment controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// Phase represents the current phase of a MachineDeployment (ScalingUp, ScalingDown, Running, Failed, or Unknown). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub phase: Option, + /// Total number of ready machines targeted by this deployment. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// Total number of non-terminated machines targeted by this deployment + /// (their labels match the selector). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// Selector is the same as the label selector but in the string format to avoid introspection + /// by clients. The string will be in the same format as the query-param syntax. + /// More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option, + /// Total number of unavailable machines targeted by this deployment. + /// This is the total number of machines that are still required for + /// the deployment to have 100% available capacity. They may either + /// be machines that are running but not yet available or machines + /// that still have not been created. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unavailableReplicas")] + pub unavailable_replicas: Option, + /// Total number of non-terminated machines targeted by this deployment + /// that have the desired template spec. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "updatedReplicas")] + pub updated_replicas: Option, +} + diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinehealthchecks.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinehealthchecks.rs new file mode 100644 index 000000000..87df8a91e --- /dev/null +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinehealthchecks.rs @@ -0,0 +1,185 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinehealthchecks.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// Specification of machine health check policy +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "cluster.x-k8s.io", version = "v1alpha3", kind = "MachineHealthCheck", plural = "machinehealthchecks")] +#[kube(namespaced)] +#[kube(status = "MachineHealthCheckStatus")] +#[kube(schema = "disabled")] +pub struct MachineHealthCheckSpec { + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// Any further remediation is only allowed if at most "MaxUnhealthy" machines selected by + /// "selector" are not healthy. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxUnhealthy")] + pub max_unhealthy: Option, + /// Machines older than this duration without a node will be considered to have + /// failed and will be remediated. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeStartupTimeout")] + pub node_startup_timeout: Option, + /// RemediationTemplate is a reference to a remediation template + /// provided by an infrastructure provider. + /// + /// + /// This field is completely optional, when filled, the MachineHealthCheck controller + /// creates a new object from the template referenced and hands off remediation of the machine to + /// a controller that lives outside of Cluster API. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "remediationTemplate")] + pub remediation_template: Option, + /// Label selector to match machines whose health will be exercised + pub selector: MachineHealthCheckSelector, + /// UnhealthyConditions contains a list of the conditions that determine + /// whether a node is considered unhealthy. The conditions are combined in a + /// logical OR, i.e. if any of the conditions is met, the node is unhealthy. + #[serde(rename = "unhealthyConditions")] + pub unhealthy_conditions: Vec, +} + +/// RemediationTemplate is a reference to a remediation template +/// provided by an infrastructure provider. +/// +/// +/// This field is completely optional, when filled, the MachineHealthCheck controller +/// creates a new object from the template referenced and hands off remediation of the machine to +/// a controller that lives outside of Cluster API. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckRemediationTemplate { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// Label selector to match machines whose health will be exercised +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// UnhealthyCondition represents a Node condition type and value with a timeout +/// specified as a duration. When the named condition has been in the given +/// status for at least the timeout value, a node is considered unhealthy. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckUnhealthyConditions { + pub status: String, + pub timeout: String, + #[serde(rename = "type")] + pub r#type: String, +} + +/// Most recently observed status of MachineHealthCheck resource +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckStatus { + /// Conditions defines current service state of the MachineHealthCheck. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// total number of healthy machines counted by this machine health check + #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentHealthy")] + pub current_healthy: Option, + /// total number of machines counted by this machine health check + #[serde(default, skip_serializing_if = "Option::is_none", rename = "expectedMachines")] + pub expected_machines: Option, + /// ObservedGeneration is the latest generation observed by the controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// RemediationsAllowed is the number of further remediations allowed by this machine health check before + /// maxUnhealthy short circuiting will be applied + #[serde(default, skip_serializing_if = "Option::is_none", rename = "remediationsAllowed")] + pub remediations_allowed: Option, + /// Targets shows the current list of machines the machine health check is watching + #[serde(default, skip_serializing_if = "Option::is_none")] + pub targets: Option>, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineHealthCheckStatusConditions { + /// Last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when + /// the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. + /// This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. + /// The specific API may choose whether or not this field is considered a guaranteed API. + /// This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately + /// understand the current situation and act accordingly. + /// The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + /// can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinepools.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinepools.rs new file mode 100644 index 000000000..2d168d3c3 --- /dev/null +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinepools.rs @@ -0,0 +1,490 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinepools.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; +use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; + +/// MachinePoolSpec defines the desired state of MachinePool. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "cluster.x-k8s.io", version = "v1alpha3", kind = "MachinePool", plural = "machinepools")] +#[kube(namespaced)] +#[kube(status = "MachinePoolStatus")] +#[kube(schema = "disabled")] +pub struct MachinePoolSpec { + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// FailureDomains is the list of failure domains this MachinePool should be attached to. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomains")] + pub failure_domains: Option>, + /// Minimum number of seconds for which a newly created machine instances should + /// be ready. + /// Defaults to 0 (machine instance will be considered available as soon as it + /// is ready) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minReadySeconds")] + pub min_ready_seconds: Option, + /// ProviderIDList are the identification IDs of machine instances provided by the provider. + /// This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerIDList")] + pub provider_id_list: Option>, + /// Number of desired machines. Defaults to 1. + /// This is a pointer to distinguish between explicit zero and not specified. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// The deployment strategy to use to replace existing machine instances with + /// new ones. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub strategy: Option, + /// Template describes the machines that will be created. + pub template: MachinePoolTemplate, +} + +/// The deployment strategy to use to replace existing machine instances with +/// new ones. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolStrategy { + /// Rolling update config params. Present only if + /// MachineDeploymentStrategyType = RollingUpdate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "rollingUpdate")] + pub rolling_update: Option, + /// Type of deployment. Currently the only supported strategy is + /// "RollingUpdate". + /// Default is RollingUpdate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] + pub r#type: Option, +} + +/// Rolling update config params. Present only if +/// MachineDeploymentStrategyType = RollingUpdate. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolStrategyRollingUpdate { + /// The maximum number of machines that can be scheduled above the + /// desired number of machines. + /// Value can be an absolute number (ex: 5) or a percentage of + /// desired machines (ex: 10%). + /// This can not be 0 if MaxUnavailable is 0. + /// Absolute number is calculated from percentage by rounding up. + /// Defaults to 1. + /// Example: when this is set to 30%, the new MachineSet can be scaled + /// up immediately when the rolling update starts, such that the total + /// number of old and new machines do not exceed 130% of desired + /// machines. Once old machines have been killed, new MachineSet can + /// be scaled up further, ensuring that total number of machines running + /// at any time during the update is at most 130% of desired machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxSurge")] + pub max_surge: Option, + /// The maximum number of machines that can be unavailable during the update. + /// Value can be an absolute number (ex: 5) or a percentage of desired + /// machines (ex: 10%). + /// Absolute number is calculated from percentage by rounding down. + /// This can not be 0 if MaxSurge is 0. + /// Defaults to 0. + /// Example: when this is set to 30%, the old MachineSet can be scaled + /// down to 70% of desired machines immediately when the rolling update + /// starts. Once new machines are ready, old MachineSet can be scaled + /// down further, followed by scaling up the new MachineSet, ensuring + /// that the total number of machines available at all times + /// during the update is at least 70% of desired machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxUnavailable")] + pub max_unavailable: Option, +} + +/// Template describes the machines that will be created. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolTemplate { + /// Standard object's metadata. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Specification of the desired behavior of the machine. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub spec: Option, +} + +/// Standard object's metadata. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolTemplateMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be + /// set by external tools to store and retrieve arbitrary metadata. They are not + /// queryable and should be preserved when modifying objects. + /// More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique + /// name ONLY IF the Name field has not been provided. + /// If this field is used, the name returned to the client will be different + /// than the name passed. This value will also be combined with a unique suffix. + /// The provided value has the same validation rules as the Name field, + /// and may be truncated by the length of the suffix required to make the value + /// unique on the server. + /// + /// + /// If this field is specified and the generated name exists, the server will + /// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + /// ServerTimeout indicating a unique name could not be found in the time allotted, and the client + /// should retry (optionally after the time indicated in the Retry-After header). + /// + /// + /// Applied only if Name is not specified. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize + /// (scope and select) objects. May match selectors of replication controllers + /// and services. + /// More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although + /// some resources may allow a client to request the generation of an appropriate name + /// automatically. Name is primarily intended for creation idempotence and configuration + /// definition. + /// Cannot be updated. + /// More info: http://kubernetes.io/docs/user-guide/identifiers#names + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is + /// equivalent to the "default" namespace, but "default" is the canonical representation. + /// Not all objects are required to be scoped to a namespace - the value of this field for + /// those objects will be empty. + /// + /// + /// Must be a DNS_LABEL. + /// Cannot be updated. + /// More info: http://kubernetes.io/docs/user-guide/namespaces + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have + /// been deleted, this object will be garbage collected. If this object is managed by a controller, + /// then an entry in this list will point to this controller, with the controller field set to true. + /// There cannot be more than one managing controller. + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning +/// object. An owning object must be in the same namespace as the dependent, or +/// be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolTemplateMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then + /// the owner cannot be deleted from the key-value store until this + /// reference is removed. + /// See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + /// for how the garbage collector interacts with this field and enforces the foreground deletion. + /// Defaults to false. + /// To set this field, a user needs "delete" permission of the owner, + /// otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// Specification of the desired behavior of the machine. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolTemplateSpec { + /// Bootstrap is a reference to a local struct which encapsulates + /// fields to configure the Machine’s bootstrapping mechanism. + pub bootstrap: MachinePoolTemplateSpecBootstrap, + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// FailureDomain is the failure domain the machine will be created in. + /// Must match a key in the FailureDomains map stored on the cluster object. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// InfrastructureRef is a required reference to a custom resource + /// offered by an infrastructure provider. + #[serde(rename = "infrastructureRef")] + pub infrastructure_ref: MachinePoolTemplateSpecInfrastructureRef, + /// NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + /// The default value is 0, meaning that the node can be drained without any time limitations. + /// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDrainTimeout")] + pub node_drain_timeout: Option, + /// ProviderID is the identification ID of the machine provided by the provider. + /// This field must match the provider ID as seen on the node object corresponding to this machine. + /// This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + /// with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + /// machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + /// generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + /// able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + /// and then a comparison is done to find out unregistered machines and are marked for delete. + /// This field will be set by the actuators and consumed by higher level entities like autoscaler that will + /// be interfacing with cluster-api as generic provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// Version defines the desired Kubernetes version. + /// This field is meant to be optionally used by bootstrap providers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// Bootstrap is a reference to a local struct which encapsulates +/// fields to configure the Machine’s bootstrapping mechanism. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolTemplateSpecBootstrap { + /// ConfigRef is a reference to a bootstrap provider-specific resource + /// that holds configuration details. The reference is optional to + /// allow users/operators to specify Bootstrap.Data without + /// the need of a controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "configRef")] + pub config_ref: Option, + /// Data contains the bootstrap data, such as cloud-init details scripts. + /// If nil, the Machine should remain in the Pending state. + /// + /// + /// Deprecated: Switch to DataSecretName. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub data: Option, + /// DataSecretName is the name of the secret that stores the bootstrap data script. + /// If nil, the Machine should remain in the Pending state. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSecretName")] + pub data_secret_name: Option, +} + +/// ConfigRef is a reference to a bootstrap provider-specific resource +/// that holds configuration details. The reference is optional to +/// allow users/operators to specify Bootstrap.Data without +/// the need of a controller. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolTemplateSpecBootstrapConfigRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// InfrastructureRef is a required reference to a custom resource +/// offered by an infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolTemplateSpecInfrastructureRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// MachinePoolStatus defines the observed state of MachinePool. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolStatus { + /// The number of available replicas (ready for at least minReadySeconds) for this MachinePool. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availableReplicas")] + pub available_replicas: Option, + /// BootstrapReady is the state of the bootstrap provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "bootstrapReady")] + pub bootstrap_ready: Option, + /// Conditions define the current service state of the MachinePool. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureMessage indicates that there is a problem reconciling the state, + /// and will be set to a descriptive error message. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// FailureReason indicates that there is a problem reconciling the state, and + /// will be set to a token value suitable for programmatic interpretation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// InfrastructureReady is the state of the infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "infrastructureReady")] + pub infrastructure_ready: Option, + /// NodeRefs will point to the corresponding Nodes if it they exist. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeRefs")] + pub node_refs: Option>, + /// ObservedGeneration is the latest generation observed by the controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// Phase represents the current phase of cluster actuation. + /// E.g. Pending, Running, Terminating, Failed etc. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub phase: Option, + /// The number of ready replicas for this MachinePool. A machine is considered ready when the node has been created and is "Ready". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// Replicas is the most recently observed number of replicas. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// Total number of unavailable machine instances targeted by this machine pool. + /// This is the total number of machine instances that are still required for + /// the machine pool to have 100% available capacity. They may either + /// be machine instances that are running but not yet available or machine instances + /// that still have not been created. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "unavailableReplicas")] + pub unavailable_replicas: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolStatusConditions { + /// Last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when + /// the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. + /// This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. + /// The specific API may choose whether or not this field is considered a guaranteed API. + /// This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately + /// understand the current situation and act accordingly. + /// The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + /// can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// ObjectReference contains enough information to let you inspect or modify the referred object. +/// --- +/// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +/// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. +/// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +/// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +/// Those cannot be well described when embedded. +/// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +/// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +/// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +/// and the version of the actual struct is irrelevant. +/// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type +/// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +/// +/// +/// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +/// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachinePoolStatusNodeRefs { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machines.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machines.rs new file mode 100644 index 000000000..4a2223c41 --- /dev/null +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machines.rs @@ -0,0 +1,310 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machines.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// MachineSpec defines the desired state of Machine. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "cluster.x-k8s.io", version = "v1alpha3", kind = "Machine", plural = "machines")] +#[kube(namespaced)] +#[kube(status = "MachineStatus")] +#[kube(schema = "disabled")] +pub struct MachineSpec { + /// Bootstrap is a reference to a local struct which encapsulates + /// fields to configure the Machine’s bootstrapping mechanism. + pub bootstrap: MachineBootstrap, + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// FailureDomain is the failure domain the machine will be created in. + /// Must match a key in the FailureDomains map stored on the cluster object. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// InfrastructureRef is a required reference to a custom resource + /// offered by an infrastructure provider. + #[serde(rename = "infrastructureRef")] + pub infrastructure_ref: MachineInfrastructureRef, + /// NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + /// The default value is 0, meaning that the node can be drained without any time limitations. + /// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDrainTimeout")] + pub node_drain_timeout: Option, + /// ProviderID is the identification ID of the machine provided by the provider. + /// This field must match the provider ID as seen on the node object corresponding to this machine. + /// This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + /// with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + /// machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + /// generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + /// able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + /// and then a comparison is done to find out unregistered machines and are marked for delete. + /// This field will be set by the actuators and consumed by higher level entities like autoscaler that will + /// be interfacing with cluster-api as generic provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// Version defines the desired Kubernetes version. + /// This field is meant to be optionally used by bootstrap providers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// Bootstrap is a reference to a local struct which encapsulates +/// fields to configure the Machine’s bootstrapping mechanism. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineBootstrap { + /// ConfigRef is a reference to a bootstrap provider-specific resource + /// that holds configuration details. The reference is optional to + /// allow users/operators to specify Bootstrap.Data without + /// the need of a controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "configRef")] + pub config_ref: Option, + /// Data contains the bootstrap data, such as cloud-init details scripts. + /// If nil, the Machine should remain in the Pending state. + /// + /// + /// Deprecated: Switch to DataSecretName. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub data: Option, + /// DataSecretName is the name of the secret that stores the bootstrap data script. + /// If nil, the Machine should remain in the Pending state. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSecretName")] + pub data_secret_name: Option, +} + +/// ConfigRef is a reference to a bootstrap provider-specific resource +/// that holds configuration details. The reference is optional to +/// allow users/operators to specify Bootstrap.Data without +/// the need of a controller. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineBootstrapConfigRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// InfrastructureRef is a required reference to a custom resource +/// offered by an infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineInfrastructureRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// MachineStatus defines the observed state of Machine. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatus { + /// Addresses is a list of addresses assigned to the machine. + /// This field is copied from the infrastructure provider reference. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub addresses: Option>, + /// BootstrapReady is the state of the bootstrap provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "bootstrapReady")] + pub bootstrap_ready: Option, + /// Conditions defines current service state of the Machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureMessage will be set in the event that there is a terminal problem + /// reconciling the Machine and will contain a more verbose string suitable + /// for logging and human consumption. + /// + /// + /// This field should not be set for transitive errors that a controller + /// faces that are expected to be fixed automatically over + /// time (like service outages), but instead indicate that something is + /// fundamentally wrong with the Machine's spec or the configuration of + /// the controller, and that manual intervention is required. Examples + /// of terminal errors would be invalid combinations of settings in the + /// spec, values that are unsupported by the controller, or the + /// responsible controller itself being critically misconfigured. + /// + /// + /// Any transient errors that occur during the reconciliation of Machines + /// can be added as events to the Machine object and/or logged in the + /// controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// FailureReason will be set in the event that there is a terminal problem + /// reconciling the Machine and will contain a succinct value suitable + /// for machine interpretation. + /// + /// + /// This field should not be set for transitive errors that a controller + /// faces that are expected to be fixed automatically over + /// time (like service outages), but instead indicate that something is + /// fundamentally wrong with the Machine's spec or the configuration of + /// the controller, and that manual intervention is required. Examples + /// of terminal errors would be invalid combinations of settings in the + /// spec, values that are unsupported by the controller, or the + /// responsible controller itself being critically misconfigured. + /// + /// + /// Any transient errors that occur during the reconciliation of Machines + /// can be added as events to the Machine object and/or logged in the + /// controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// InfrastructureReady is the state of the infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "infrastructureReady")] + pub infrastructure_ready: Option, + /// LastUpdated identifies when the phase of the Machine last transitioned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastUpdated")] + pub last_updated: Option, + /// NodeRef will point to the corresponding Node if it exists. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeRef")] + pub node_ref: Option, + /// ObservedGeneration is the latest generation observed by the controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// Phase represents the current phase of machine actuation. + /// E.g. Pending, Running, Terminating, Failed etc. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub phase: Option, + /// Version specifies the current version of Kubernetes running + /// on the corresponding Node. This is meant to be a means of bubbling + /// up status from the Node to the Machine. + /// It is entirely optional, but useful for end-user UX if it’s present. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// MachineAddress contains information for the node's address. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatusAddresses { + /// The machine address. + pub address: String, + /// Machine address type, one of Hostname, ExternalIP or InternalIP. + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatusConditions { + /// Last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when + /// the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. + /// This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. + /// The specific API may choose whether or not this field is considered a guaranteed API. + /// This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately + /// understand the current situation and act accordingly. + /// The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + /// can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// NodeRef will point to the corresponding Node if it exists. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineStatusNodeRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinesets.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinesets.rs new file mode 100644 index 000000000..1ee601bf4 --- /dev/null +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/machinesets.rs @@ -0,0 +1,398 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api/cluster.x-k8s.io/v1alpha3/machinesets.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// MachineSetSpec defines the desired state of MachineSet. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "cluster.x-k8s.io", version = "v1alpha3", kind = "MachineSet", plural = "machinesets")] +#[kube(namespaced)] +#[kube(status = "MachineSetStatus")] +#[kube(schema = "disabled")] +pub struct MachineSetSpec { + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// DeletePolicy defines the policy used to identify nodes to delete when downscaling. + /// Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deletePolicy")] + pub delete_policy: Option, + /// MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + /// Defaults to 0 (machine will be considered available as soon as it is ready) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "minReadySeconds")] + pub min_ready_seconds: Option, + /// Replicas is the number of desired replicas. + /// This is a pointer to distinguish between explicit zero and unspecified. + /// Defaults to 1. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// Selector is a label query over machines that should match the replica count. + /// Label keys and values that must match in order to be controlled by this MachineSet. + /// It must match the machine template's labels. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + pub selector: MachineSetSelector, + /// Template is the object that describes the machine that will be created if + /// insufficient replicas are detected. + /// Object references to custom resources are treated as templates. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub template: Option, +} + +/// MachineSetSpec defines the desired state of MachineSet. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum MachineSetDeletePolicy { + Random, + Newest, + Oldest, +} + +/// Selector is a label query over machines that should match the replica count. +/// Label keys and values that must match in order to be controlled by this MachineSet. +/// It must match the machine template's labels. +/// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +/// Template is the object that describes the machine that will be created if +/// insufficient replicas are detected. +/// Object references to custom resources are treated as templates. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplate { + /// Standard object's metadata. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Specification of the desired behavior of the machine. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + #[serde(default, skip_serializing_if = "Option::is_none")] + pub spec: Option, +} + +/// Standard object's metadata. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be + /// set by external tools to store and retrieve arbitrary metadata. They are not + /// queryable and should be preserved when modifying objects. + /// More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique + /// name ONLY IF the Name field has not been provided. + /// If this field is used, the name returned to the client will be different + /// than the name passed. This value will also be combined with a unique suffix. + /// The provided value has the same validation rules as the Name field, + /// and may be truncated by the length of the suffix required to make the value + /// unique on the server. + /// + /// + /// If this field is specified and the generated name exists, the server will + /// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + /// ServerTimeout indicating a unique name could not be found in the time allotted, and the client + /// should retry (optionally after the time indicated in the Retry-After header). + /// + /// + /// Applied only if Name is not specified. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize + /// (scope and select) objects. May match selectors of replication controllers + /// and services. + /// More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although + /// some resources may allow a client to request the generation of an appropriate name + /// automatically. Name is primarily intended for creation idempotence and configuration + /// definition. + /// Cannot be updated. + /// More info: http://kubernetes.io/docs/user-guide/identifiers#names + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is + /// equivalent to the "default" namespace, but "default" is the canonical representation. + /// Not all objects are required to be scoped to a namespace - the value of this field for + /// those objects will be empty. + /// + /// + /// Must be a DNS_LABEL. + /// Cannot be updated. + /// More info: http://kubernetes.io/docs/user-guide/namespaces + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have + /// been deleted, this object will be garbage collected. If this object is managed by a controller, + /// then an entry in this list will point to this controller, with the controller field set to true. + /// There cannot be more than one managing controller. + /// + /// + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning +/// object. An owning object must be in the same namespace as the dependent, or +/// be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then + /// the owner cannot be deleted from the key-value store until this + /// reference is removed. + /// See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + /// for how the garbage collector interacts with this field and enforces the foreground deletion. + /// Defaults to false. + /// To set this field, a user needs "delete" permission of the owner, + /// otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// Specification of the desired behavior of the machine. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpec { + /// Bootstrap is a reference to a local struct which encapsulates + /// fields to configure the Machine’s bootstrapping mechanism. + pub bootstrap: MachineSetTemplateSpecBootstrap, + /// ClusterName is the name of the Cluster this object belongs to. + #[serde(rename = "clusterName")] + pub cluster_name: String, + /// FailureDomain is the failure domain the machine will be created in. + /// Must match a key in the FailureDomains map stored on the cluster object. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// InfrastructureRef is a required reference to a custom resource + /// offered by an infrastructure provider. + #[serde(rename = "infrastructureRef")] + pub infrastructure_ref: MachineSetTemplateSpecInfrastructureRef, + /// NodeDrainTimeout is the total amount of time that the controller will spend on draining a node. + /// The default value is 0, meaning that the node can be drained without any time limitations. + /// NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDrainTimeout")] + pub node_drain_timeout: Option, + /// ProviderID is the identification ID of the machine provided by the provider. + /// This field must match the provider ID as seen on the node object corresponding to this machine. + /// This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + /// with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + /// machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + /// generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + /// able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + /// and then a comparison is done to find out unregistered machines and are marked for delete. + /// This field will be set by the actuators and consumed by higher level entities like autoscaler that will + /// be interfacing with cluster-api as generic provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// Version defines the desired Kubernetes version. + /// This field is meant to be optionally used by bootstrap providers. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +/// Bootstrap is a reference to a local struct which encapsulates +/// fields to configure the Machine’s bootstrapping mechanism. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecBootstrap { + /// ConfigRef is a reference to a bootstrap provider-specific resource + /// that holds configuration details. The reference is optional to + /// allow users/operators to specify Bootstrap.Data without + /// the need of a controller. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "configRef")] + pub config_ref: Option, + /// Data contains the bootstrap data, such as cloud-init details scripts. + /// If nil, the Machine should remain in the Pending state. + /// + /// + /// Deprecated: Switch to DataSecretName. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub data: Option, + /// DataSecretName is the name of the secret that stores the bootstrap data script. + /// If nil, the Machine should remain in the Pending state. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSecretName")] + pub data_secret_name: Option, +} + +/// ConfigRef is a reference to a bootstrap provider-specific resource +/// that holds configuration details. The reference is optional to +/// allow users/operators to specify Bootstrap.Data without +/// the need of a controller. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecBootstrapConfigRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// InfrastructureRef is a required reference to a custom resource +/// offered by an infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetTemplateSpecInfrastructureRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string + /// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + /// For example, if the object reference is to a container within a pod, this would take on a value like: + /// "spec.containers{name}" (where "name" refers to the name of the container that triggered + /// the event) or if no container name is specified "spec.containers[2]" (container with + /// index 2 in this pod). This syntax is chosen only to have some well-defined way of + /// referencing a part of an object. + /// TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// MachineSetStatus defines the observed state of MachineSet. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct MachineSetStatus { + /// The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "availableReplicas")] + pub available_replicas: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// In the event that there is a terminal problem reconciling the + /// replicas, both FailureReason and FailureMessage will be set. FailureReason + /// will be populated with a succinct value suitable for machine + /// interpretation, while FailureMessage will contain a more verbose + /// string suitable for logging and human consumption. + /// + /// + /// These fields should not be set for transitive errors that a + /// controller faces that are expected to be fixed automatically over + /// time (like service outages), but instead indicate that something is + /// fundamentally wrong with the MachineTemplate's spec or the configuration of + /// the machine controller, and that manual intervention is required. Examples + /// of terminal errors would be invalid combinations of settings in the + /// spec, values that are unsupported by the machine controller, or the + /// responsible machine controller itself being critically misconfigured. + /// + /// + /// Any transient errors that occur during the reconciliation of Machines + /// can be added as events to the MachineSet object and/or logged in the + /// controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// The number of replicas that have labels matching the labels of the machine template of the MachineSet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fullyLabeledReplicas")] + pub fully_labeled_replicas: Option, + /// ObservedGeneration reflects the generation of the most recently observed MachineSet. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] + pub observed_generation: Option, + /// The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readyReplicas")] + pub ready_replicas: Option, + /// Replicas is the most recently observed number of replicas. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub replicas: Option, + /// Selector is the same as the label selector but in the string format to avoid introspection + /// by clients. The string will be in the same format as the query-param syntax. + /// More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option, +} + diff --git a/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/mod.rs b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/mod.rs new file mode 100644 index 000000000..40dd605cd --- /dev/null +++ b/kube-custom-resources-rs/src/cluster_x_k8s_io/v1alpha3/mod.rs @@ -0,0 +1,6 @@ +pub mod clusters; +pub mod machinedeployments; +pub mod machinehealthchecks; +pub mod machinepools; +pub mod machines; +pub mod machinesets; diff --git a/kube-custom-resources-rs/src/config_openshift_io/v1/infrastructures.rs b/kube-custom-resources-rs/src/config_openshift_io/v1/infrastructures.rs index 2a9a93446..0b3059218 100644 --- a/kube-custom-resources-rs/src/config_openshift_io/v1/infrastructures.rs +++ b/kube-custom-resources-rs/src/config_openshift_io/v1/infrastructures.rs @@ -368,6 +368,10 @@ pub struct InfrastructurePlatformSpecVsphereFailureDomainsTopology { /// resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] pub resource_pool: Option, + /// template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. + /// When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub template: Option, } /// nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found. diff --git a/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/backups.rs b/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/backups.rs index a95a9e705..d58dfa886 100644 --- a/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/backups.rs +++ b/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/backups.rs @@ -23,46 +23,74 @@ pub struct BackupSpec { /// BackupStatus defines the observed state of Backup #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BackupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, /// Time at which the backup was created. This is the request time of the backup. #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupCreationDateTime")] pub backup_creation_date_time: Option, - /// Time at which the automatic on-demand backup created by DynamoDB will expire. This SYSTEM on-demand backup expires automatically 35 days after its creation. + /// Time at which the automatic on-demand backup created by DynamoDB will expire. + /// This SYSTEM on-demand backup expires automatically 35 days after its creation. #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupExpiryDateTime")] pub backup_expiry_date_time: Option, - /// Size of the backup in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value. + /// Size of the backup in bytes. DynamoDB updates this value approximately every + /// six hours. Recent changes might not be reflected in this value. #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupSizeBytes")] pub backup_size_bytes: Option, /// Backup can be in one of the following states: CREATING, ACTIVE, DELETED. #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupStatus")] pub backup_status: Option, - /// BackupType: - /// * USER - You create and manage these using the on-demand backup feature. - /// * SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM backup is automatically created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted table to the state it was in just before the point of deletion. - /// * AWS_BACKUP - On-demand backup created by you from Backup service. + /// BackupType: + /// + /// + /// * USER - You create and manage these using the on-demand backup feature. + /// + /// + /// * SYSTEM - If you delete a table with point-in-time recovery enabled, + /// a SYSTEM backup is automatically created and is retained for 35 days (at + /// no additional cost). System backups allow you to restore the deleted table + /// to the state it was in just before the point of deletion. + /// + /// + /// * AWS_BACKUP - On-demand backup created by you from Backup service. #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupType")] pub backup_type: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BackupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BackupStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/globaltables.rs b/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/globaltables.rs index 9e6cca5ca..57528508a 100644 --- a/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/globaltables.rs +++ b/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/globaltables.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// GlobalTableSpec defines the desired state of GlobalTable. -/// Represents the properties of a global table. +/// GlobalTableSpec defines the desired state of GlobalTable. +/// +/// +/// Represents the properties of a global table. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "dynamodb.services.k8s.aws", version = "v1alpha1", kind = "GlobalTable", plural = "globaltables")] #[kube(namespaced)] @@ -31,38 +33,63 @@ pub struct GlobalTableReplicationGroup { /// GlobalTableStatus defines the observed state of GlobalTable #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalTableStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The creation time of the global table. #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationDateTime")] pub creation_date_time: Option, - /// The current state of the global table: - /// * CREATING - The global table is being created. - /// * UPDATING - The global table is being updated. - /// * DELETING - The global table is being deleted. - /// * ACTIVE - The global table is ready for use. + /// The current state of the global table: + /// + /// + /// * CREATING - The global table is being created. + /// + /// + /// * UPDATING - The global table is being updated. + /// + /// + /// * DELETING - The global table is being deleted. + /// + /// + /// * ACTIVE - The global table is ready for use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalTableStatus")] pub global_table_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalTableStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalTableStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/tables.rs b/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/tables.rs index 40afa27cd..65f3a1fcf 100644 --- a/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/tables.rs +++ b/kube-custom-resources-rs/src/dynamodb_services_k8s_aws/v1alpha1/tables.rs @@ -15,53 +15,158 @@ pub struct TableSpec { /// An array of attributes that describe the key schema for the table and indexes. #[serde(rename = "attributeDefinitions")] pub attribute_definitions: Vec, - /// Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later. - /// * PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). - /// * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). + /// Controls how you are charged for read and write throughput and how you manage + /// capacity. This setting can be changed later. + /// + /// + /// * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + /// PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). + /// + /// + /// * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + /// workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). #[serde(default, skip_serializing_if = "Option::is_none", rename = "billingMode")] pub billing_mode: Option, /// Represents the settings used to enable point in time recovery. #[serde(default, skip_serializing_if = "Option::is_none", rename = "continuousBackups")] pub continuous_backups: Option, - /// Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table. + /// Indicates whether deletion protection is to be enabled (true) or disabled + /// (false) on the table. #[serde(default, skip_serializing_if = "Option::is_none", rename = "deletionProtectionEnabled")] pub deletion_protection_enabled: Option, - /// One or more global secondary indexes (the maximum is 20) to be created on the table. Each global secondary index in the array includes the following: - /// * IndexName - The name of the global secondary index. Must be unique only for this table. - /// * KeySchema - Specifies the key schema for the global secondary index. - /// * Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of: ProjectionType - One of the following: KEYS_ONLY - Only the index and primary keys are projected into the index. INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes is in NonKeyAttributes. ALL - All of the table attributes are projected into the index. NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. - /// * ProvisionedThroughput - The provisioned throughput settings for the global secondary index, consisting of read and write capacity units. + /// One or more global secondary indexes (the maximum is 20) to be created on + /// the table. Each global secondary index in the array includes the following: + /// + /// + /// * IndexName - The name of the global secondary index. Must be unique only + /// for this table. + /// + /// + /// * KeySchema - Specifies the key schema for the global secondary index. + /// + /// + /// * Projection - Specifies attributes that are copied (projected) from the + /// table into the index. These are in addition to the primary key attributes + /// and index key attributes, which are automatically projected. Each attribute + /// specification is composed of: ProjectionType - One of the following: KEYS_ONLY + /// - Only the index and primary keys are projected into the index. INCLUDE + /// - Only the specified table attributes are projected into the index. The + /// list of projected attributes is in NonKeyAttributes. ALL - All of the + /// table attributes are projected into the index. NonKeyAttributes - A list + /// of one or more non-key attribute names that are projected into the secondary + /// index. The total count of attributes provided in NonKeyAttributes, summed + /// across all of the secondary indexes, must not exceed 100. If you project + /// the same attribute into two different indexes, this counts as two distinct + /// attributes when determining the total. + /// + /// + /// * ProvisionedThroughput - The provisioned throughput settings for the + /// global secondary index, consisting of read and write capacity units. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalSecondaryIndexes")] pub global_secondary_indexes: Option>, - /// Specifies the attributes that make up the primary key for a table or an index. The attributes in KeySchema must also be defined in the AttributeDefinitions array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) in the Amazon DynamoDB Developer Guide. - /// Each KeySchemaElement in the array is composed of: - /// * AttributeName - The name of this key attribute. - /// * KeyType - The role that the key attribute will assume: HASH - partition key RANGE - sort key - /// The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from the DynamoDB usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values. - /// The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value. - /// For a simple primary key (partition key), you must provide exactly one element with a KeyType of HASH. - /// For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order: The first element must have a KeyType of HASH, and the second element must have a KeyType of RANGE. - /// For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) in the Amazon DynamoDB Developer Guide. + /// Specifies the attributes that make up the primary key for a table or an index. + /// The attributes in KeySchema must also be defined in the AttributeDefinitions + /// array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) + /// in the Amazon DynamoDB Developer Guide. + /// + /// + /// Each KeySchemaElement in the array is composed of: + /// + /// + /// * AttributeName - The name of this key attribute. + /// + /// + /// * KeyType - The role that the key attribute will assume: HASH - partition + /// key RANGE - sort key + /// + /// + /// The partition key of an item is also known as its hash attribute. The term + /// "hash attribute" derives from the DynamoDB usage of an internal hash function + /// to evenly distribute data items across partitions, based on their partition + /// key values. + /// + /// + /// The sort key of an item is also known as its range attribute. The term "range + /// attribute" derives from the way DynamoDB stores items with the same partition + /// key physically close together, in sorted order by the sort key value. + /// + /// + /// For a simple primary key (partition key), you must provide exactly one element + /// with a KeyType of HASH. + /// + /// + /// For a composite primary key (partition key and sort key), you must provide + /// exactly two elements, in this order: The first element must have a KeyType + /// of HASH, and the second element must have a KeyType of RANGE. + /// + /// + /// For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + /// in the Amazon DynamoDB Developer Guide. #[serde(rename = "keySchema")] pub key_schema: Vec, - /// One or more local secondary indexes (the maximum is 5) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained. - /// Each local secondary index in the array includes the following: - /// * IndexName - The name of the local secondary index. Must be unique only for this table. - /// * KeySchema - Specifies the key schema for the local secondary index. The key schema must begin with the same partition key as the table. - /// * Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. Each attribute specification is composed of: ProjectionType - One of the following: KEYS_ONLY - Only the index and primary keys are projected into the index. INCLUDE - Only the specified table attributes are projected into the index. The list of projected attributes is in NonKeyAttributes. ALL - All of the table attributes are projected into the index. NonKeyAttributes - A list of one or more non-key attribute names that are projected into the secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. + /// One or more local secondary indexes (the maximum is 5) to be created on the + /// table. Each index is scoped to a given partition key value. There is a 10 + /// GB size limit per partition key value; otherwise, the size of a local secondary + /// index is unconstrained. + /// + /// + /// Each local secondary index in the array includes the following: + /// + /// + /// * IndexName - The name of the local secondary index. Must be unique only + /// for this table. + /// + /// + /// * KeySchema - Specifies the key schema for the local secondary index. + /// The key schema must begin with the same partition key as the table. + /// + /// + /// * Projection - Specifies attributes that are copied (projected) from the + /// table into the index. These are in addition to the primary key attributes + /// and index key attributes, which are automatically projected. Each attribute + /// specification is composed of: ProjectionType - One of the following: KEYS_ONLY + /// - Only the index and primary keys are projected into the index. INCLUDE + /// - Only the specified table attributes are projected into the index. The + /// list of projected attributes is in NonKeyAttributes. ALL - All of the + /// table attributes are projected into the index. NonKeyAttributes - A list + /// of one or more non-key attribute names that are projected into the secondary + /// index. The total count of attributes provided in NonKeyAttributes, summed + /// across all of the secondary indexes, must not exceed 100. If you project + /// the same attribute into two different indexes, this counts as two distinct + /// attributes when determining the total. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localSecondaryIndexes")] pub local_secondary_indexes: Option>, - /// Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. - /// If you set BillingMode as PROVISIONED, you must specify this property. If you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. - /// For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide. + /// Represents the provisioned throughput settings for a specified table or index. + /// The settings can be modified using the UpdateTable operation. + /// + /// + /// If you set BillingMode as PROVISIONED, you must specify this property. If + /// you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. + /// + /// + /// For current minimum and maximum provisioned throughput values, see Service, + /// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + /// in the Amazon DynamoDB Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionedThroughput")] pub provisioned_throughput: Option, /// Represents the settings used to enable server-side encryption. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sseSpecification")] pub sse_specification: Option, - /// The settings for DynamoDB Streams on the table. These settings consist of: - /// * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) or disabled (false). - /// * StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified item are written to the stream. NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream. OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream. NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream. + /// The settings for DynamoDB Streams on the table. These settings consist of: + /// + /// + /// * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled + /// (true) or disabled (false). + /// + /// + /// * StreamViewType - When an item in the table is modified, StreamViewType + /// determines what information is written to the table's stream. Valid values + /// for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified + /// item are written to the stream. NEW_IMAGE - The entire item, as it appears + /// after it was modified, is written to the stream. OLD_IMAGE - The entire + /// item, as it appeared before it was modified, is written to the stream. + /// NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + /// are written to the stream. #[serde(default, skip_serializing_if = "Option::is_none", rename = "streamSpecification")] pub stream_specification: Option, /// The table class of the new table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. @@ -70,10 +175,12 @@ pub struct TableSpec { /// The name of the table to create. #[serde(rename = "tableName")] pub table_name: String, - /// A list of key-value pairs to label the table. For more information, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). + /// A list of key-value pairs to label the table. For more information, see Tagging + /// for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// Represents the settings used to enable or disable Time to Live for the specified table. + /// Represents the settings used to enable or disable Time to Live for the specified + /// table. #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeToLive")] pub time_to_live: Option, } @@ -101,18 +208,35 @@ pub struct TableGlobalSecondaryIndexes { pub index_name: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "keySchema")] pub key_schema: Option>, - /// Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. + /// Represents attributes that are copied (projected) from the table into an + /// index. These are in addition to the primary key attributes and index key + /// attributes, which are automatically projected. #[serde(default, skip_serializing_if = "Option::is_none")] pub projection: Option, - /// Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. - /// For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide. + /// Represents the provisioned throughput settings for a specified table or index. + /// The settings can be modified using the UpdateTable operation. + /// + /// + /// For current minimum and maximum provisioned throughput values, see Service, + /// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + /// in the Amazon DynamoDB Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionedThroughput")] pub provisioned_throughput: Option, } -/// Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. -/// A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. -/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map. +/// Represents a single element of a key schema. A key schema specifies the attributes +/// that make up the primary key of a table, or the key attributes of an index. +/// +/// +/// A KeySchemaElement represents exactly one attribute of the primary key. For +/// example, a simple primary key would be represented by one KeySchemaElement +/// (for the partition key). A composite primary key would require one KeySchemaElement +/// for the partition key, and another KeySchemaElement for the sort key. +/// +/// +/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +/// The data type must be one of String, Number, or Binary. The attribute cannot +/// be nested within a List or a Map. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableGlobalSecondaryIndexesKeySchema { #[serde(default, skip_serializing_if = "Option::is_none", rename = "attributeName")] @@ -121,7 +245,9 @@ pub struct TableGlobalSecondaryIndexesKeySchema { pub key_type: Option, } -/// Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. +/// Represents attributes that are copied (projected) from the table into an +/// index. These are in addition to the primary key attributes and index key +/// attributes, which are automatically projected. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableGlobalSecondaryIndexesProjection { #[serde(default, skip_serializing_if = "Option::is_none", rename = "nonKeyAttributes")] @@ -130,8 +256,13 @@ pub struct TableGlobalSecondaryIndexesProjection { pub projection_type: Option, } -/// Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. -/// For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide. +/// Represents the provisioned throughput settings for a specified table or index. +/// The settings can be modified using the UpdateTable operation. +/// +/// +/// For current minimum and maximum provisioned throughput values, see Service, +/// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +/// in the Amazon DynamoDB Developer Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableGlobalSecondaryIndexesProvisionedThroughput { #[serde(default, skip_serializing_if = "Option::is_none", rename = "readCapacityUnits")] @@ -140,9 +271,19 @@ pub struct TableGlobalSecondaryIndexesProvisionedThroughput { pub write_capacity_units: Option, } -/// Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. -/// A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. -/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map. +/// Represents a single element of a key schema. A key schema specifies the attributes +/// that make up the primary key of a table, or the key attributes of an index. +/// +/// +/// A KeySchemaElement represents exactly one attribute of the primary key. For +/// example, a simple primary key would be represented by one KeySchemaElement +/// (for the partition key). A composite primary key would require one KeySchemaElement +/// for the partition key, and another KeySchemaElement for the sort key. +/// +/// +/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +/// The data type must be one of String, Number, or Binary. The attribute cannot +/// be nested within a List or a Map. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableKeySchema { #[serde(default, skip_serializing_if = "Option::is_none", rename = "attributeName")] @@ -158,14 +299,26 @@ pub struct TableLocalSecondaryIndexes { pub index_name: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "keySchema")] pub key_schema: Option>, - /// Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. + /// Represents attributes that are copied (projected) from the table into an + /// index. These are in addition to the primary key attributes and index key + /// attributes, which are automatically projected. #[serde(default, skip_serializing_if = "Option::is_none")] pub projection: Option, } -/// Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. -/// A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. -/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map. +/// Represents a single element of a key schema. A key schema specifies the attributes +/// that make up the primary key of a table, or the key attributes of an index. +/// +/// +/// A KeySchemaElement represents exactly one attribute of the primary key. For +/// example, a simple primary key would be represented by one KeySchemaElement +/// (for the partition key). A composite primary key would require one KeySchemaElement +/// for the partition key, and another KeySchemaElement for the sort key. +/// +/// +/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +/// The data type must be one of String, Number, or Binary. The attribute cannot +/// be nested within a List or a Map. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableLocalSecondaryIndexesKeySchema { #[serde(default, skip_serializing_if = "Option::is_none", rename = "attributeName")] @@ -174,7 +327,9 @@ pub struct TableLocalSecondaryIndexesKeySchema { pub key_type: Option, } -/// Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. +/// Represents attributes that are copied (projected) from the table into an +/// index. These are in addition to the primary key attributes and index key +/// attributes, which are automatically projected. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableLocalSecondaryIndexesProjection { #[serde(default, skip_serializing_if = "Option::is_none", rename = "nonKeyAttributes")] @@ -183,9 +338,17 @@ pub struct TableLocalSecondaryIndexesProjection { pub projection_type: Option, } -/// Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. -/// If you set BillingMode as PROVISIONED, you must specify this property. If you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. -/// For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) in the Amazon DynamoDB Developer Guide. +/// Represents the provisioned throughput settings for a specified table or index. +/// The settings can be modified using the UpdateTable operation. +/// +/// +/// If you set BillingMode as PROVISIONED, you must specify this property. If +/// you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. +/// +/// +/// For current minimum and maximum provisioned throughput values, see Service, +/// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +/// in the Amazon DynamoDB Developer Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableProvisionedThroughput { #[serde(default, skip_serializing_if = "Option::is_none", rename = "readCapacityUnits")] @@ -205,9 +368,21 @@ pub struct TableSseSpecification { pub sse_type: Option, } -/// The settings for DynamoDB Streams on the table. These settings consist of: -/// * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) or disabled (false). -/// * StreamViewType - When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified item are written to the stream. NEW_IMAGE - The entire item, as it appears after it was modified, is written to the stream. OLD_IMAGE - The entire item, as it appeared before it was modified, is written to the stream. NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are written to the stream. +/// The settings for DynamoDB Streams on the table. These settings consist of: +/// +/// +/// * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled +/// (true) or disabled (false). +/// +/// +/// * StreamViewType - When an item in the table is modified, StreamViewType +/// determines what information is written to the table's stream. Valid values +/// for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified +/// item are written to the stream. NEW_IMAGE - The entire item, as it appears +/// after it was modified, is written to the stream. OLD_IMAGE - The entire +/// item, as it appeared before it was modified, is written to the stream. +/// NEW_AND_OLD_IMAGES - Both the new and the old item images of the item +/// are written to the stream. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStreamSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "streamEnabled")] @@ -216,9 +391,19 @@ pub struct TableStreamSpecification { pub stream_view_type: Option, } -/// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table. -/// Amazon Web Services-assigned tag names and values are automatically assigned the aws: prefix, which the user cannot assign. Amazon Web Services-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user: in the Cost Allocation Report. You cannot backdate the application of a tag. -/// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) in the Amazon DynamoDB Developer Guide. +/// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to +/// a single DynamoDB table. +/// +/// +/// Amazon Web Services-assigned tag names and values are automatically assigned +/// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned +/// tag names do not count towards the tag limit of 50. User-assigned tag names +/// have the prefix user: in the Cost Allocation Report. You cannot backdate +/// the application of a tag. +/// +/// +/// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +/// in the Amazon DynamoDB Developer Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -227,7 +412,8 @@ pub struct TableTags { pub value: Option, } -/// Represents the settings used to enable or disable Time to Live for the specified table. +/// Represents the settings used to enable or disable Time to Live for the specified +/// table. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableTimeToLive { #[serde(default, skip_serializing_if = "Option::is_none", rename = "attributeName")] @@ -239,34 +425,54 @@ pub struct TableTimeToLive { /// TableStatus defines the observed state of Table #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, /// Contains information about the table archive. #[serde(default, skip_serializing_if = "Option::is_none", rename = "archivalSummary")] pub archival_summary: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) format. + /// The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) + /// format. #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationDateTime")] pub creation_date_time: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalSecondaryIndexesDescriptions")] pub global_secondary_indexes_descriptions: Option>, - /// Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) in use, if the table is replicated across Amazon Web Services Regions. + /// Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) + /// in use, if the table is replicated across Amazon Web Services Regions. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalTableVersion")] pub global_table_version: Option, - /// The number of items in the specified table. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value. + /// The number of items in the specified table. DynamoDB updates this value approximately + /// every six hours. Recent changes might not be reflected in this value. #[serde(default, skip_serializing_if = "Option::is_none", rename = "itemCount")] pub item_count: Option, - /// The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this table. + /// The Amazon Resource Name (ARN) that uniquely identifies the latest stream + /// for this table. #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestStreamARN")] pub latest_stream_arn: Option, - /// A timestamp, in ISO 8601 format, for this stream. - /// Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique: - /// * Amazon Web Services customer ID - /// * Table name - /// * StreamLabel + /// A timestamp, in ISO 8601 format, for this stream. + /// + /// + /// Note that LatestStreamLabel is not a unique identifier for the stream, because + /// it is possible that a stream from another table might have the same timestamp. + /// However, the combination of the following three elements is guaranteed to + /// be unique: + /// + /// + /// * Amazon Web Services customer ID + /// + /// + /// * Table name + /// + /// + /// * StreamLabel #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestStreamLabel")] pub latest_stream_label: Option, /// Represents replicas of the table. @@ -278,28 +484,60 @@ pub struct TableStatus { /// Unique identifier for the table for which the backup was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tableID")] pub table_id: Option, - /// The total size of the specified table, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value. + /// The total size of the specified table, in bytes. DynamoDB updates this value + /// approximately every six hours. Recent changes might not be reflected in this + /// value. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tableSizeBytes")] pub table_size_bytes: Option, - /// The current state of the table: - /// * CREATING - The table is being created. - /// * UPDATING - The table/index configuration is being updated. The table/index remains available for data operations when UPDATING. - /// * DELETING - The table is being deleted. - /// * ACTIVE - The table is ready for use. - /// * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table in inaccessible. Table operations may fail due to failure to use the KMS key. DynamoDB will initiate the table archival process when a table's KMS key remains inaccessible for more than seven days. - /// * ARCHIVING - The table is being archived. Operations are not allowed until archival is complete. - /// * ARCHIVED - The table has been archived. See the ArchivalReason for more information. + /// The current state of the table: + /// + /// + /// * CREATING - The table is being created. + /// + /// + /// * UPDATING - The table/index configuration is being updated. The table/index + /// remains available for data operations when UPDATING. + /// + /// + /// * DELETING - The table is being deleted. + /// + /// + /// * ACTIVE - The table is ready for use. + /// + /// + /// * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the + /// table in inaccessible. Table operations may fail due to failure to use + /// the KMS key. DynamoDB will initiate the table archival process when a + /// table's KMS key remains inaccessible for more than seven days. + /// + /// + /// * ARCHIVING - The table is being archived. Operations are not allowed + /// until archival is complete. + /// + /// + /// * ARCHIVED - The table has been archived. See the ArchivalReason for more + /// information. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tableStatus")] pub table_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. @@ -317,7 +555,9 @@ pub struct TableStatusArchivalSummary { pub archival_reason: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatusConditions { /// Last time the condition transitioned from one status to another. @@ -353,17 +593,30 @@ pub struct TableStatusGlobalSecondaryIndexesDescriptions { pub item_count: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "keySchema")] pub key_schema: Option>, - /// Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. + /// Represents attributes that are copied (projected) from the table into an + /// index. These are in addition to the primary key attributes and index key + /// attributes, which are automatically projected. #[serde(default, skip_serializing_if = "Option::is_none")] pub projection: Option, - /// Represents the provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases. + /// Represents the provisioned throughput settings for the table, consisting + /// of read and write capacity units, along with data about increases and decreases. #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionedThroughput")] pub provisioned_throughput: Option, } -/// Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index. -/// A KeySchemaElement represents exactly one attribute of the primary key. For example, a simple primary key would be represented by one KeySchemaElement (for the partition key). A composite primary key would require one KeySchemaElement for the partition key, and another KeySchemaElement for the sort key. -/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map. +/// Represents a single element of a key schema. A key schema specifies the attributes +/// that make up the primary key of a table, or the key attributes of an index. +/// +/// +/// A KeySchemaElement represents exactly one attribute of the primary key. For +/// example, a simple primary key would be represented by one KeySchemaElement +/// (for the partition key). A composite primary key would require one KeySchemaElement +/// for the partition key, and another KeySchemaElement for the sort key. +/// +/// +/// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +/// The data type must be one of String, Number, or Binary. The attribute cannot +/// be nested within a List or a Map. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatusGlobalSecondaryIndexesDescriptionsKeySchema { #[serde(default, skip_serializing_if = "Option::is_none", rename = "attributeName")] @@ -372,7 +625,9 @@ pub struct TableStatusGlobalSecondaryIndexesDescriptionsKeySchema { pub key_type: Option, } -/// Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected. +/// Represents attributes that are copied (projected) from the table into an +/// index. These are in addition to the primary key attributes and index key +/// attributes, which are automatically projected. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatusGlobalSecondaryIndexesDescriptionsProjection { #[serde(default, skip_serializing_if = "Option::is_none", rename = "nonKeyAttributes")] @@ -381,7 +636,8 @@ pub struct TableStatusGlobalSecondaryIndexesDescriptionsProjection { pub projection_type: Option, } -/// Represents the provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases. +/// Represents the provisioned throughput settings for the table, consisting +/// of read and write capacity units, along with data about increases and decreases. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatusGlobalSecondaryIndexesDescriptionsProvisionedThroughput { #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastDecreaseDateTime")] @@ -403,7 +659,8 @@ pub struct TableStatusReplicas { pub global_secondary_indexes: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsMasterKeyID")] pub kms_master_key_id: Option, - /// Replica-specific provisioned throughput settings. If not specified, uses the source table's provisioned throughput settings. + /// Replica-specific provisioned throughput settings. If not specified, uses + /// the source table's provisioned throughput settings. #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionedThroughputOverride")] pub provisioned_throughput_override: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "regionName")] @@ -426,19 +683,22 @@ pub struct TableStatusReplicas { pub struct TableStatusReplicasGlobalSecondaryIndexes { #[serde(default, skip_serializing_if = "Option::is_none", rename = "indexName")] pub index_name: Option, - /// Replica-specific provisioned throughput settings. If not specified, uses the source table's provisioned throughput settings. + /// Replica-specific provisioned throughput settings. If not specified, uses + /// the source table's provisioned throughput settings. #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionedThroughputOverride")] pub provisioned_throughput_override: Option, } -/// Replica-specific provisioned throughput settings. If not specified, uses the source table's provisioned throughput settings. +/// Replica-specific provisioned throughput settings. If not specified, uses +/// the source table's provisioned throughput settings. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatusReplicasGlobalSecondaryIndexesProvisionedThroughputOverride { #[serde(default, skip_serializing_if = "Option::is_none", rename = "readCapacityUnits")] pub read_capacity_units: Option, } -/// Replica-specific provisioned throughput settings. If not specified, uses the source table's provisioned throughput settings. +/// Replica-specific provisioned throughput settings. If not specified, uses +/// the source table's provisioned throughput settings. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TableStatusReplicasProvisionedThroughputOverride { #[serde(default, skip_serializing_if = "Option::is_none", rename = "readCapacityUnits")] diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/dhcpoptions.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/dhcpoptions.rs index 6e4d323bc..344c20a5b 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/dhcpoptions.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/dhcpoptions.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// DhcpOptionsSpec defines the desired state of DhcpOptions. -/// Describes a set of DHCP options. +/// DhcpOptionsSpec defines the desired state of DhcpOptions. +/// +/// +/// Describes a set of DHCP options. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "DHCPOptions", plural = "dhcpoptions")] #[kube(namespaced)] @@ -16,7 +18,9 @@ pub struct DHCPOptionsSpec { /// A DHCP configuration option. #[serde(rename = "dhcpConfigurations")] pub dhcp_configurations: Vec, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -42,16 +46,24 @@ pub struct DHCPOptionsTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DHCPOptionsVpcRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DHCPOptionsVpcRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -61,10 +73,15 @@ pub struct DHCPOptionsVpcRefsFrom { /// DHCPOptionsStatus defines the observed state of DHCPOptions #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DHCPOptionsStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The ID of the set of DHCP options. @@ -75,20 +92,32 @@ pub struct DHCPOptionsStatus { pub owner_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DHCPOptionsStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DHCPOptionsStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/elasticipaddresses.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/elasticipaddresses.rs index af4cec9af..54d7544eb 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/elasticipaddresses.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/elasticipaddresses.rs @@ -12,21 +12,37 @@ use serde::{Serialize, Deserialize}; #[kube(status = "ElasticIPAddressStatus")] #[kube(schema = "disabled")] pub struct ElasticIPAddressSpec { - /// [EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address pool. + /// [EC2-VPC] The Elastic IP address to recover or an IPv4 address from an address + /// pool. #[serde(default, skip_serializing_if = "Option::is_none")] pub address: Option, - /// The ID of a customer-owned address pool. Use this parameter to let Amazon EC2 select an address from the address pool. Alternatively, specify a specific address from the address pool. + /// The ID of a customer-owned address pool. Use this parameter to let Amazon + /// EC2 select an address from the address pool. Alternatively, specify a specific + /// address from the address pool. #[serde(default, skip_serializing_if = "Option::is_none", rename = "customerOwnedIPv4Pool")] pub customer_owned_i_pv4_pool: Option, - /// A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. - /// Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) to view the network border groups. - /// You cannot use a network border group with EC2 Classic. If you attempt this operation on EC2 Classic, you receive an InvalidParameterCombination error. + /// A unique set of Availability Zones, Local Zones, or Wavelength Zones from + /// which Amazon Web Services advertises IP addresses. Use this parameter to + /// limit the IP address to this location. IP addresses cannot move between network + /// border groups. + /// + /// + /// Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) + /// to view the network border groups. + /// + /// + /// You cannot use a network border group with EC2 Classic. If you attempt this + /// operation on EC2 Classic, you receive an InvalidParameterCombination error. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkBorderGroup")] pub network_border_group: Option, - /// The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool. To specify a specific address from the address pool, use the Address parameter instead. + /// The ID of an address pool that you own. Use this parameter to let Amazon + /// EC2 select an address from the address pool. To specify a specific address + /// from the address pool, use the Address parameter instead. #[serde(default, skip_serializing_if = "Option::is_none", rename = "publicIPv4Pool")] pub public_i_pv4_pool: Option, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -43,16 +59,23 @@ pub struct ElasticIPAddressTags { /// ElasticIPAddressStatus defines the observed state of ElasticIPAddress #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ElasticIPAddressStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// [EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation of the Elastic IP address for use with instances in a VPC. + /// [EC2-VPC] The ID that Amazon Web Services assigns to represent the allocation + /// of the Elastic IP address for use with instances in a VPC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocationID")] pub allocation_id: Option, - /// The carrier IP address. This option is only available for network interfaces which reside in a subnet in a Wavelength Zone (for example an EC2 instance). + /// The carrier IP address. This option is only available for network interfaces + /// which reside in a subnet in a Wavelength Zone (for example an EC2 instance). #[serde(default, skip_serializing_if = "Option::is_none", rename = "carrierIP")] pub carrier_ip: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The customer-owned IP address. @@ -63,20 +86,32 @@ pub struct ElasticIPAddressStatus { pub public_ip: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ElasticIPAddressStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ElasticIPAddressStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/instances.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/instances.rs index d8cc94353..7dcfab14f 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/instances.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/instances.rs @@ -5,89 +5,170 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// InstanceSpec defines the desired state of Instance. -/// Describes an instance. +/// InstanceSpec defines the desired state of Instance. +/// +/// +/// Describes an instance. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "Instance", plural = "instances")] #[kube(namespaced)] #[kube(status = "InstanceStatus")] #[kube(schema = "disabled")] pub struct InstanceSpec { - /// The block device mapping, which defines the EBS volumes and instance store volumes to attach to the instance at launch. For more information, see Block device mappings (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) in the Amazon EC2 User Guide. + /// The block device mapping, which defines the EBS volumes and instance store + /// volumes to attach to the instance at launch. For more information, see Block + /// device mappings (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockDeviceMappings")] pub block_device_mappings: Option>, - /// Information about the Capacity Reservation targeting option. If you do not specify this parameter, the instance's Capacity Reservation preference defaults to open, which enables it to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). + /// Information about the Capacity Reservation targeting option. If you do not + /// specify this parameter, the instance's Capacity Reservation preference defaults + /// to open, which enables it to run in any open Capacity Reservation that has + /// matching attributes (instance type, platform, Availability Zone). #[serde(default, skip_serializing_if = "Option::is_none", rename = "capacityReservationSpecification")] pub capacity_reservation_specification: Option, - /// The CPU options for the instance. For more information, see Optimize CPU options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) in the Amazon EC2 User Guide. + /// The CPU options for the instance. For more information, see Optimize CPU + /// options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "cpuOptions")] pub cpu_options: Option, - /// The credit option for CPU usage of the burstable performance instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) in the Amazon EC2 User Guide. - /// Default: standard (T2 instances) or unlimited (T3/T3a/T4g instances) - /// For T3 instances with host tenancy, only standard is supported. + /// The credit option for CPU usage of the burstable performance instance. Valid + /// values are standard and unlimited. To change this attribute after launch, + /// use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). + /// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) + /// in the Amazon EC2 User Guide. + /// + /// + /// Default: standard (T2 instances) or unlimited (T3/T3a/T4g instances) + /// + /// + /// For T3 instances with host tenancy, only standard is supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "creditSpecification")] pub credit_specification: Option, - /// Indicates whether an instance is enabled for stop protection. For more information, see Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection). + /// Indicates whether an instance is enabled for stop protection. For more information, + /// see Stop protection (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection). #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableAPIStop")] pub disable_api_stop: Option, - /// If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance. - /// Default: false + /// If you set this parameter to true, you can't terminate the instance using + /// the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute + /// after launch, use ModifyInstanceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html). + /// Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, + /// you can terminate the instance by running the shutdown command from the instance. + /// + /// + /// Default: false #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableAPITermination")] pub disable_api_termination: Option, - /// Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance. - /// Default: false + /// Indicates whether the instance is optimized for Amazon EBS I/O. This optimization + /// provides dedicated throughput to Amazon EBS and an optimized configuration + /// stack to provide optimal Amazon EBS I/O performance. This optimization isn't + /// available with all instance types. Additional usage charges apply when using + /// an EBS-optimized instance. + /// + /// + /// Default: false #[serde(default, skip_serializing_if = "Option::is_none", rename = "ebsOptimized")] pub ebs_optimized: Option, - /// An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource that you can attach to your Windows instance to accelerate the graphics performance of your applications. For more information, see Amazon EC2 Elastic GPUs (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html) in the Amazon EC2 User Guide. + /// An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource + /// that you can attach to your Windows instance to accelerate the graphics performance + /// of your applications. For more information, see Amazon EC2 Elastic GPUs (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-graphics.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "elasticGPUSpecification")] pub elastic_gpu_specification: Option>, - /// An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. - /// You cannot specify accelerators from different generations in the same request. + /// An elastic inference accelerator to associate with the instance. Elastic + /// inference accelerators are a resource you can attach to your Amazon EC2 instances + /// to accelerate your Deep Learning (DL) inference workloads. + /// + /// + /// You cannot specify accelerators from different generations in the same request. #[serde(default, skip_serializing_if = "Option::is_none", rename = "elasticInferenceAccelerators")] pub elastic_inference_accelerators: Option>, - /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) in the Amazon Web Services Nitro Enclaves User Guide. - /// You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. + /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. + /// For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + /// in the Amazon Web Services Nitro Enclaves User Guide. + /// + /// + /// You can't enable Amazon Web Services Nitro Enclaves and hibernation on the + /// same instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enclaveOptions")] pub enclave_options: Option, - /// Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the Amazon EC2 User Guide. - /// You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance. + /// Indicates whether an instance is enabled for hibernation. For more information, + /// see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + /// in the Amazon EC2 User Guide. + /// + /// + /// You can't enable hibernation and Amazon Web Services Nitro Enclaves on the + /// same instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hibernationOptions")] pub hibernation_options: Option, /// The name or Amazon Resource Name (ARN) of an IAM instance profile. #[serde(default, skip_serializing_if = "Option::is_none", rename = "iamInstanceProfile")] pub iam_instance_profile: Option, - /// The ID of the AMI. An AMI ID is required to launch an instance and must be specified here or in a launch template. + /// The ID of the AMI. An AMI ID is required to launch an instance and must be + /// specified here or in a launch template. #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageID")] pub image_id: Option, - /// Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown). - /// Default: stop + /// Indicates whether an instance stops or terminates when you initiate shutdown + /// from the instance (using the operating system command for system shutdown). + /// + /// + /// Default: stop #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceInitiatedShutdownBehavior")] pub instance_initiated_shutdown_behavior: Option, - /// The market (purchasing) option for the instances. - /// For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop. + /// The market (purchasing) option for the instances. + /// + /// + /// For RunInstances, persistent Spot Instance requests are only supported when + /// InstanceInterruptionBehavior is set to either hibernate or stop. #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceMarketOptions")] pub instance_market_options: Option, - /// The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the Amazon EC2 User Guide. - /// Default: m1.small + /// The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + /// in the Amazon EC2 User Guide. + /// + /// + /// Default: m1.small #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] pub instance_type: Option, - /// [EC2-VPC] The number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch. - /// You cannot specify this option and the network interfaces option in the same request. + /// [EC2-VPC] The number of IPv6 addresses to associate with the primary network + /// interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. + /// You cannot specify this option and the option to assign specific IPv6 addresses + /// in the same request. You can specify this option if you've specified a minimum + /// number of instances to launch. + /// + /// + /// You cannot specify this option and the network interfaces option in the same + /// request. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6AddressCount")] pub ipv6_address_count: Option, - /// [EC2-VPC] The IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch. - /// You cannot specify this option and the network interfaces option in the same request. + /// [EC2-VPC] The IPv6 addresses from the range of the subnet to associate with + /// the primary network interface. You cannot specify this option and the option + /// to assign a number of IPv6 addresses in the same request. You cannot specify + /// this option if you've specified a minimum number of instances to launch. + /// + /// + /// You cannot specify this option and the network interfaces option in the same + /// request. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6Addresses")] pub ipv6_addresses: Option>, - /// The ID of the kernel. - /// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the Amazon EC2 User Guide. + /// The ID of the kernel. + /// + /// + /// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more + /// information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelID")] pub kernel_id: Option, - /// The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) or ImportKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html). - /// If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in. + /// The name of the key pair. You can create a key pair using CreateKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) + /// or ImportKeyPair (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html). + /// + /// + /// If you do not specify a key pair, you can't connect to the instance unless + /// you choose an AMI that is configured to allow users another way to log in. #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyName")] pub key_name: Option, - /// The launch template to use to launch the instances. Any parameters that you specify in RunInstances override the same parameters in the launch template. You can specify either the name or ID of a launch template, but not both. + /// The launch template to use to launch the instances. Any parameters that you + /// specify in RunInstances override the same parameters in the launch template. + /// You can specify either the name or ID of a launch template, but not both. #[serde(default, skip_serializing_if = "Option::is_none", rename = "launchTemplate")] pub launch_template: Option, /// The license configurations. @@ -96,60 +177,115 @@ pub struct InstanceSpec { /// The maintenance and recovery options for the instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maintenanceOptions")] pub maintenance_options: Option, - /// The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount. - /// Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) in the Amazon EC2 FAQ. + /// The maximum number of instances to launch. If you specify more instances + /// than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches + /// the largest possible number of instances above MinCount. + /// + /// + /// Constraints: Between 1 and the maximum number you're allowed for the specified + /// instance type. For more information about the default limits, and how to + /// request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + /// in the Amazon EC2 FAQ. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxCount")] pub max_count: Option, - /// The metadata options for the instance. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + /// The metadata options for the instance. For more information, see Instance + /// metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "metadataOptions")] pub metadata_options: Option, - /// The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances. - /// Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) in the Amazon EC2 General FAQ. + /// The minimum number of instances to launch. If you specify a minimum that + /// is more instances than Amazon EC2 can launch in the target Availability Zone, + /// Amazon EC2 launches no instances. + /// + /// + /// Constraints: Between 1 and the maximum number you're allowed for the specified + /// instance type. For more information about the default limits, and how to + /// request an increase, see How many instances can I run in Amazon EC2 (http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2) + /// in the Amazon EC2 General FAQ. #[serde(default, skip_serializing_if = "Option::is_none", rename = "minCount")] pub min_count: Option, /// Specifies whether detailed monitoring is enabled for the instance. #[serde(default, skip_serializing_if = "Option::is_none")] pub monitoring: Option, - /// The network interfaces to associate with the instance. If you specify a network interface, you must specify any security groups and subnets as part of the network interface. + /// The network interfaces to associate with the instance. If you specify a network + /// interface, you must specify any security groups and subnets as part of the + /// network interface. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkInterfaces")] pub network_interfaces: Option>, /// The placement for the instance. #[serde(default, skip_serializing_if = "Option::is_none")] pub placement: Option, - /// The options for the instance hostname. The default values are inherited from the subnet. + /// The options for the instance hostname. The default values are inherited from + /// the subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateDNSNameOptions")] pub private_dns_name_options: Option, - /// [EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet. - /// Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request. - /// You cannot specify this option and the network interfaces option in the same request. + /// [EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 + /// address range of the subnet. + /// + /// + /// Only one private IP address can be designated as primary. You can't specify + /// this option if you've specified the option to designate a private IP address + /// as the primary IP address in a network interface specification. You cannot + /// specify this option if you're launching more than one instance in the request. + /// + /// + /// You cannot specify this option and the network interfaces option in the same + /// request. #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateIPAddress")] pub private_ip_address: Option, - /// The ID of the RAM disk to select. Some kernels require additional drivers at launch. Check the kernel requirements for information about whether you need to specify a RAM disk. To find kernel requirements, go to the Amazon Web Services Resource Center and search for the kernel ID. - /// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the Amazon EC2 User Guide. + /// The ID of the RAM disk to select. Some kernels require additional drivers + /// at launch. Check the kernel requirements for information about whether you + /// need to specify a RAM disk. To find kernel requirements, go to the Amazon + /// Web Services Resource Center and search for the kernel ID. + /// + /// + /// We recommend that you use PV-GRUB instead of kernels and RAM disks. For more + /// information, see PV-GRUB (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ramDiskID")] pub ram_disk_id: Option, - /// The IDs of the security groups. You can create a security group using CreateSecurityGroup (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). - /// If you specify a network interface, you must specify any security groups as part of the network interface. + /// The IDs of the security groups. You can create a security group using CreateSecurityGroup + /// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html). + /// + /// + /// If you specify a network interface, you must specify any security groups + /// as part of the network interface. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] pub security_group_i_ds: Option>, - /// [EC2-Classic, default VPC] The names of the security groups. For a nondefault VPC, you must use security group IDs instead. - /// If you specify a network interface, you must specify any security groups as part of the network interface. - /// Default: Amazon EC2 uses the default security group. + /// [EC2-Classic, default VPC] The names of the security groups. For a nondefault + /// VPC, you must use security group IDs instead. + /// + /// + /// If you specify a network interface, you must specify any security groups + /// as part of the network interface. + /// + /// + /// Default: Amazon EC2 uses the default security group. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroups")] pub security_groups: Option>, - /// [EC2-VPC] The ID of the subnet to launch the instance into. - /// If you specify a network interface, you must specify any subnets as part of the network interface. + /// [EC2-VPC] The ID of the subnet to launch the instance into. + /// + /// + /// If you specify a network interface, you must specify any subnets as part + /// of the network interface. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetID")] pub subnet_id: Option, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// The user data script to make available to the instance. For more information, see Run commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and Run commands on your Windows instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-windows-user-data.html). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB. + /// The user data script to make available to the instance. For more information, + /// see Run commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + /// and Run commands on your Windows instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-windows-user-data.html). + /// If you are using a command line tool, base64-encoding is performed for you, + /// and you can load the text from a file. Otherwise, you must provide base64-encoded + /// text. User data is limited to 16 KB. #[serde(default, skip_serializing_if = "Option::is_none", rename = "userData")] pub user_data: Option, } -/// Describes a block device mapping, which defines the EBS volumes and instance store volumes to attach to an instance at launch. +/// Describes a block device mapping, which defines the EBS volumes and instance +/// store volumes to attach to an instance at launch. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceBlockDeviceMappings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "deviceName")] @@ -186,7 +322,10 @@ pub struct InstanceBlockDeviceMappingsEbs { pub volume_type: Option, } -/// Information about the Capacity Reservation targeting option. If you do not specify this parameter, the instance's Capacity Reservation preference defaults to open, which enables it to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). +/// Information about the Capacity Reservation targeting option. If you do not +/// specify this parameter, the instance's Capacity Reservation preference defaults +/// to open, which enables it to run in any open Capacity Reservation that has +/// matching attributes (instance type, platform, Availability Zone). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceCapacityReservationSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "capacityReservationPreference")] @@ -205,7 +344,9 @@ pub struct InstanceCapacityReservationSpecificationCapacityReservationTarget { pub capacity_reservation_resource_group_arn: Option, } -/// The CPU options for the instance. For more information, see Optimize CPU options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) in the Amazon EC2 User Guide. +/// The CPU options for the instance. For more information, see Optimize CPU +/// options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) +/// in the Amazon EC2 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceCpuOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "coreCount")] @@ -214,9 +355,17 @@ pub struct InstanceCpuOptions { pub threads_per_core: Option, } -/// The credit option for CPU usage of the burstable performance instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) in the Amazon EC2 User Guide. -/// Default: standard (T2 instances) or unlimited (T3/T3a/T4g instances) -/// For T3 instances with host tenancy, only standard is supported. +/// The credit option for CPU usage of the burstable performance instance. Valid +/// values are standard and unlimited. To change this attribute after launch, +/// use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). +/// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +/// in the Amazon EC2 User Guide. +/// +/// +/// Default: standard (T2 instances) or unlimited (T3/T3a/T4g instances) +/// +/// +/// For T3 instances with host tenancy, only standard is supported. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceCreditSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "cpuCredits")] @@ -239,16 +388,26 @@ pub struct InstanceElasticInferenceAccelerators { pub r#type: Option, } -/// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) in the Amazon Web Services Nitro Enclaves User Guide. -/// You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. +/// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. +/// For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +/// in the Amazon Web Services Nitro Enclaves User Guide. +/// +/// +/// You can't enable Amazon Web Services Nitro Enclaves and hibernation on the +/// same instance. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceEnclaveOptions { #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option, } -/// Indicates whether an instance is enabled for hibernation. For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the Amazon EC2 User Guide. -/// You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance. +/// Indicates whether an instance is enabled for hibernation. For more information, +/// see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +/// in the Amazon EC2 User Guide. +/// +/// +/// You can't enable hibernation and Amazon Web Services Nitro Enclaves on the +/// same instance. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceHibernationOptions { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -264,8 +423,11 @@ pub struct InstanceIamInstanceProfile { pub name: Option, } -/// The market (purchasing) option for the instances. -/// For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop. +/// The market (purchasing) option for the instances. +/// +/// +/// For RunInstances, persistent Spot Instance requests are only supported when +/// InstanceInterruptionBehavior is set to either hibernate or stop. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceInstanceMarketOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "marketType")] @@ -297,7 +459,9 @@ pub struct InstanceIpv6Addresses { pub ipv6_address: Option, } -/// The launch template to use to launch the instances. Any parameters that you specify in RunInstances override the same parameters in the launch template. You can specify either the name or ID of a launch template, but not both. +/// The launch template to use to launch the instances. Any parameters that you +/// specify in RunInstances override the same parameters in the launch template. +/// You can specify either the name or ID of a launch template, but not both. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceLaunchTemplate { #[serde(default, skip_serializing_if = "Option::is_none", rename = "launchTemplateID")] @@ -322,7 +486,8 @@ pub struct InstanceMaintenanceOptions { pub auto_recovery: Option, } -/// The metadata options for the instance. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +/// The metadata options for the instance. For more information, see Instance +/// metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceMetadataOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpEndpoint")] @@ -436,7 +601,8 @@ pub struct InstancePlacement { pub tenancy: Option, } -/// The options for the instance hostname. The default values are inherited from the subnet. +/// The options for the instance hostname. The default values are inherited from +/// the subnet. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstancePrivateDnsNameOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableResourceNameDNSAAAARecord")] @@ -459,22 +625,29 @@ pub struct InstanceTags { /// InstanceStatus defines the observed state of Instance #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// The AMI launch index, which can be used to find this instance in the launch group. + /// The AMI launch index, which can be used to find this instance in the launch + /// group. #[serde(default, skip_serializing_if = "Option::is_none", rename = "amiLaunchIndex")] pub ami_launch_index: Option, /// The architecture of the image. #[serde(default, skip_serializing_if = "Option::is_none")] pub architecture: Option, - /// The boot mode of the instance. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) in the Amazon EC2 User Guide. + /// The boot mode of the instance. For more information, see Boot modes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "bootMode")] pub boot_mode: Option, /// The ID of the Capacity Reservation. #[serde(default, skip_serializing_if = "Option::is_none", rename = "capacityReservationID")] pub capacity_reservation_id: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The Elastic GPU associated with the instance. @@ -486,7 +659,8 @@ pub struct InstanceStatus { /// Specifies whether enhanced networking with ENA is enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enaSupport")] pub ena_support: Option, - /// The hypervisor type of the instance. The value xen is used for both Xen and Nitro hypervisors. + /// The hypervisor type of the instance. The value xen is used for both Xen and + /// Nitro hypervisors. #[serde(default, skip_serializing_if = "Option::is_none")] pub hypervisor: Option, /// The ID of the instance. @@ -510,27 +684,43 @@ pub struct InstanceStatus { /// The value is Windows for Windows instances; otherwise blank. #[serde(default, skip_serializing_if = "Option::is_none")] pub platform: Option, - /// The platform details value for the instance. For more information, see AMI billing information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html) in the Amazon EC2 User Guide. + /// The platform details value for the instance. For more information, see AMI + /// billing information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "platformDetails")] pub platform_details: Option, - /// (IPv4 only) The private DNS hostname name assigned to the instance. This DNS hostname can only be used inside the Amazon EC2 network. This name is not available until the instance enters the running state. - /// [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private DNS hostnames if you've enabled DNS resolution and DNS hostnames in your VPC. If you are not using the Amazon-provided DNS server in your VPC, your custom domain name servers must resolve the hostname as appropriate. + /// (IPv4 only) The private DNS hostname name assigned to the instance. This + /// DNS hostname can only be used inside the Amazon EC2 network. This name is + /// not available until the instance enters the running state. + /// + /// + /// [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private + /// DNS hostnames if you've enabled DNS resolution and DNS hostnames in your + /// VPC. If you are not using the Amazon-provided DNS server in your VPC, your + /// custom domain name servers must resolve the hostname as appropriate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateDNSName")] pub private_dns_name: Option, /// The product codes attached to this instance, if applicable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "productCodes")] pub product_codes: Option>, - /// (IPv4 only) The public DNS name assigned to the instance. This name is not available until the instance enters the running state. For EC2-VPC, this name is only available if you've enabled DNS hostnames for your VPC. + /// (IPv4 only) The public DNS name assigned to the instance. This name is not + /// available until the instance enters the running state. For EC2-VPC, this + /// name is only available if you've enabled DNS hostnames for your VPC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "publicDNSName")] pub public_dns_name: Option, - /// The public IPv4 address, or the Carrier IP address assigned to the instance, if applicable. - /// A Carrier IP address only applies to an instance launched in a subnet associated with a Wavelength Zone. + /// The public IPv4 address, or the Carrier IP address assigned to the instance, + /// if applicable. + /// + /// + /// A Carrier IP address only applies to an instance launched in a subnet associated + /// with a Wavelength Zone. #[serde(default, skip_serializing_if = "Option::is_none", rename = "publicIPAddress")] pub public_ip_address: Option, /// The device name of the root device volume (for example, /dev/sda1). #[serde(default, skip_serializing_if = "Option::is_none", rename = "rootDeviceName")] pub root_device_name: Option, - /// The root device type used by the AMI. The AMI can use an EBS volume or an instance store volume. + /// The root device type used by the AMI. The AMI can use an EBS volume or an + /// instance store volume. #[serde(default, skip_serializing_if = "Option::is_none", rename = "rootDeviceType")] pub root_device_type: Option, /// Indicates whether source/destination checking is enabled. @@ -539,7 +729,8 @@ pub struct InstanceStatus { /// If the request is a Spot Instance request, the ID of the request. #[serde(default, skip_serializing_if = "Option::is_none", rename = "spotInstanceRequestID")] pub spot_instance_request_id: Option, - /// Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled. + /// Specifies whether enhanced networking with the Intel 82599 Virtual Function + /// interface is enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sriovNetSupport")] pub sriov_net_support: Option, /// The current state of the instance. @@ -551,10 +742,14 @@ pub struct InstanceStatus { /// The reason for the most recent state transition. This might be an empty string. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stateTransitionReason")] pub state_transition_reason: Option, - /// If the instance is configured for NitroTPM support, the value is v2.0. For more information, see NitroTPM (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) in the Amazon EC2 User Guide. + /// If the instance is configured for NitroTPM support, the value is v2.0. For + /// more information, see NitroTPM (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tpmSupport")] pub tpm_support: Option, - /// The usage operation value for the instance. For more information, see AMI billing information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html) in the Amazon EC2 User Guide. + /// The usage operation value for the instance. For more information, see AMI + /// billing information fields (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/billing-info-fields.html) + /// in the Amazon EC2 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "usageOperation")] pub usage_operation: Option, /// The time that the usage operation was last updated. @@ -568,20 +763,32 @@ pub struct InstanceStatus { pub vpc_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InstanceStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/internetgateways.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/internetgateways.rs index 40dea8b8b..2959b0a23 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/internetgateways.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/internetgateways.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// InternetGatewaySpec defines the desired state of InternetGateway. -/// Describes an internet gateway. +/// InternetGatewaySpec defines the desired state of InternetGateway. +/// +/// +/// Describes an internet gateway. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "InternetGateway", plural = "internetgateways")] #[kube(namespaced)] @@ -17,28 +19,44 @@ pub struct InternetGatewaySpec { pub route_table_refs: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeTables")] pub route_tables: Option>, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// The ID of the VPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub vpc: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcRef")] pub vpc_ref: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayRouteTableRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayRouteTableRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -54,16 +72,24 @@ pub struct InternetGatewayTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayVpcRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayVpcRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -73,13 +99,18 @@ pub struct InternetGatewayVpcRefFrom { /// InternetGatewayStatus defines the observed state of InternetGateway #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, /// Any VPCs attached to the internet gateway. #[serde(default, skip_serializing_if = "Option::is_none")] pub attachments: Option>, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The ID of the internet gateway. @@ -90,20 +121,31 @@ pub struct InternetGatewayStatus { pub owner_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Describes the attachment of a VPC to an internet gateway or an egress-only internet gateway. +/// Describes the attachment of a VPC to an internet gateway or an egress-only +/// internet gateway. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayStatusAttachments { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -112,7 +154,9 @@ pub struct InternetGatewayStatusAttachments { pub vpc_id: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct InternetGatewayStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/natgateways.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/natgateways.rs index e1d5adcf5..f1af61a04 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/natgateways.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/natgateways.rs @@ -5,62 +5,98 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// NatGatewaySpec defines the desired state of NatGateway. -/// Describes a NAT gateway. +/// NatGatewaySpec defines the desired state of NatGateway. +/// +/// +/// Describes a NAT gateway. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "NATGateway", plural = "natgateways")] #[kube(namespaced)] #[kube(status = "NATGatewayStatus")] #[kube(schema = "disabled")] pub struct NATGatewaySpec { - /// [Public NAT gateways only] The allocation ID of an Elastic IP address to associate with the NAT gateway. You cannot specify an Elastic IP address with a private NAT gateway. If the Elastic IP address is associated with another resource, you must first disassociate it. + /// [Public NAT gateways only] The allocation ID of an Elastic IP address to + /// associate with the NAT gateway. You cannot specify an Elastic IP address + /// with a private NAT gateway. If the Elastic IP address is associated with + /// another resource, you must first disassociate it. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocationID")] pub allocation_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocationRef")] pub allocation_ref: Option, - /// Indicates whether the NAT gateway supports public or private connectivity. The default is public connectivity. + /// Indicates whether the NAT gateway supports public or private connectivity. + /// The default is public connectivity. #[serde(default, skip_serializing_if = "Option::is_none", rename = "connectivityType")] pub connectivity_type: Option, /// The subnet in which to create the NAT gateway. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetID")] pub subnet_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetRef")] pub subnet_ref: Option, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewayAllocationRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewayAllocationRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewaySubnetRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewaySubnetRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -79,10 +115,15 @@ pub struct NATGatewayTags { /// NATGatewayStatus defines the observed state of NATGateway #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewayStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The date and time the NAT gateway was created. @@ -91,33 +132,73 @@ pub struct NATGatewayStatus { /// The date and time the NAT gateway was deleted, if applicable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "deleteTime")] pub delete_time: Option, - /// If the NAT gateway could not be created, specifies the error code for the failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound) + /// If the NAT gateway could not be created, specifies the error code for the + /// failure. (InsufficientFreeAddressesInSubnet | Gateway.NotAttached | InvalidAllocationID.NotFound + /// | Resource.AlreadyAssociated | InternalError | InvalidSubnetID.NotFound) #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureCode")] pub failure_code: Option, - /// If the NAT gateway could not be created, specifies the error message for the failure, that corresponds to the error code. - /// * For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free addresses to create this NAT gateway" - /// * For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway attached" - /// * For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx could not be associated with this NAT gateway" - /// * For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx is already associated" - /// * For InternalError: "Network interface eni-xxxxxxxx, created and used internally by this NAT gateway is in an invalid state. Please try again." - /// * For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx does not exist or could not be found." + /// If the NAT gateway could not be created, specifies the error message for + /// the failure, that corresponds to the error code. + /// + /// + /// * For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free + /// addresses to create this NAT gateway" + /// + /// + /// * For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway + /// attached" + /// + /// + /// * For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx + /// could not be associated with this NAT gateway" + /// + /// + /// * For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx + /// is already associated" + /// + /// + /// * For InternalError: "Network interface eni-xxxxxxxx, created and used + /// internally by this NAT gateway is in an invalid state. Please try again." + /// + /// + /// * For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx + /// does not exist or could not be found." #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] pub failure_message: Option, - /// Information about the IP addresses and network interface associated with the NAT gateway. + /// Information about the IP addresses and network interface associated with + /// the NAT gateway. #[serde(default, skip_serializing_if = "Option::is_none", rename = "natGatewayAddresses")] pub nat_gateway_addresses: Option>, /// The ID of the NAT gateway. #[serde(default, skip_serializing_if = "Option::is_none", rename = "natGatewayID")] pub nat_gateway_id: Option, - /// Reserved. If you need to sustain traffic greater than the documented limits (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), contact us through the Support Center (https://console.aws.amazon.com/support/home?). + /// Reserved. If you need to sustain traffic greater than the documented limits + /// (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), + /// contact us through the Support Center (https://console.aws.amazon.com/support/home?). #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionedBandwidth")] pub provisioned_bandwidth: Option, - /// The state of the NAT gateway. - /// * pending: The NAT gateway is being created and is not ready to process traffic. - /// * failed: The NAT gateway could not be created. Check the failureCode and failureMessage fields for the reason. - /// * available: The NAT gateway is able to process traffic. This status remains until you delete the NAT gateway, and does not indicate the health of the NAT gateway. - /// * deleting: The NAT gateway is in the process of being terminated and may still be processing traffic. - /// * deleted: The NAT gateway has been terminated and is no longer processing traffic. + /// The state of the NAT gateway. + /// + /// + /// * pending: The NAT gateway is being created and is not ready to process + /// traffic. + /// + /// + /// * failed: The NAT gateway could not be created. Check the failureCode + /// and failureMessage fields for the reason. + /// + /// + /// * available: The NAT gateway is able to process traffic. This status remains + /// until you delete the NAT gateway, and does not indicate the health of + /// the NAT gateway. + /// + /// + /// * deleting: The NAT gateway is in the process of being terminated and + /// may still be processing traffic. + /// + /// + /// * deleted: The NAT gateway has been terminated and is no longer processing + /// traffic. #[serde(default, skip_serializing_if = "Option::is_none")] pub state: Option, /// The ID of the VPC in which the NAT gateway is located. @@ -125,20 +206,32 @@ pub struct NATGatewayStatus { pub vpc_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewayStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewayStatusConditions { /// Last time the condition transitioned from one status to another. @@ -170,7 +263,9 @@ pub struct NATGatewayStatusNatGatewayAddresses { pub public_ip: Option, } -/// Reserved. If you need to sustain traffic greater than the documented limits (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), contact us through the Support Center (https://console.aws.amazon.com/support/home?). +/// Reserved. If you need to sustain traffic greater than the documented limits +/// (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), +/// contact us through the Support Center (https://console.aws.amazon.com/support/home?). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NATGatewayStatusProvisionedBandwidth { #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionTime")] diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/routetables.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/routetables.rs index b1172a8d6..bf42b9ae1 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/routetables.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/routetables.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// RouteTableSpec defines the desired state of RouteTable. -/// Describes a route table. +/// RouteTableSpec defines the desired state of RouteTable. +/// +/// +/// Describes a route table. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "RouteTable", plural = "routetables")] #[kube(namespaced)] @@ -15,14 +17,22 @@ use serde::{Serialize, Deserialize}; pub struct RouteTableSpec { #[serde(default, skip_serializing_if = "Option::is_none")] pub routes: Option>, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// The ID of the VPC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcID")] pub vpc_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcRef")] pub vpc_ref: Option, } @@ -77,12 +87,14 @@ pub struct RouteTableRoutes { /// Reference field for GatewayID #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesGatewayRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesGatewayRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -92,12 +104,14 @@ pub struct RouteTableRoutesGatewayRefFrom { /// Reference field for NATGatewayID #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesNatGatewayRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesNatGatewayRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -107,12 +121,14 @@ pub struct RouteTableRoutesNatGatewayRefFrom { /// Reference field for TransitGatewayID #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesTransitGatewayRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesTransitGatewayRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -122,12 +138,14 @@ pub struct RouteTableRoutesTransitGatewayRefFrom { /// Reference field for VPCEndpointID #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesVpcEndpointRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesVpcEndpointRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -137,12 +155,14 @@ pub struct RouteTableRoutesVpcEndpointRefFrom { /// Reference field for VPCPeeringConnectionID #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesVpcPeeringConnectionRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableRoutesVpcPeeringConnectionRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -158,16 +178,24 @@ pub struct RouteTableTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableVpcRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableVpcRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -177,13 +205,18 @@ pub struct RouteTableVpcRefFrom { /// RouteTableStatus defines the observed state of RouteTable #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, /// The associations between the route table and one or more subnets or a gateway. #[serde(default, skip_serializing_if = "Option::is_none")] pub associations: Option>, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The ID of the Amazon Web Services account that owns the route table. @@ -200,13 +233,23 @@ pub struct RouteTableStatus { pub route_table_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. @@ -216,7 +259,8 @@ pub struct RouteTableStatusAckResourceMetadata { /// Describes an association between a route table and a subnet or gateway. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableStatusAssociations { - /// Describes the state of an association between a route table and a subnet or gateway. + /// Describes the state of an association between a route table and a subnet + /// or gateway. #[serde(default, skip_serializing_if = "Option::is_none", rename = "associationState")] pub association_state: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "gatewayID")] @@ -231,7 +275,8 @@ pub struct RouteTableStatusAssociations { pub subnet_id: Option, } -/// Describes the state of an association between a route table and a subnet or gateway. +/// Describes the state of an association between a route table and a subnet +/// or gateway. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableStatusAssociationsAssociationState { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -240,7 +285,9 @@ pub struct RouteTableStatusAssociationsAssociationState { pub status_message: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RouteTableStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/securitygroups.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/securitygroups.rs index 8edf0d7e4..9eecac2cc 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/securitygroups.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/securitygroups.rs @@ -5,36 +5,58 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// SecurityGroupSpec defines the desired state of SecurityGroup. -/// Describes a security group. +/// SecurityGroupSpec defines the desired state of SecurityGroup. +/// +/// +/// Describes a security group. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "SecurityGroup", plural = "securitygroups")] #[kube(namespaced)] #[kube(status = "SecurityGroupStatus")] #[kube(schema = "disabled")] pub struct SecurityGroupSpec { - /// A description for the security group. This is informational only. - /// Constraints: Up to 255 characters in length - /// Constraints for EC2-Classic: ASCII characters - /// Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + /// A description for the security group. This is informational only. + /// + /// + /// Constraints: Up to 255 characters in length + /// + /// + /// Constraints for EC2-Classic: ASCII characters + /// + /// + /// Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* pub description: String, #[serde(default, skip_serializing_if = "Option::is_none", rename = "egressRules")] pub egress_rules: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressRules")] pub ingress_rules: Option>, - /// The name of the security group. - /// Constraints: Up to 255 characters in length. Cannot start with sg-. - /// Constraints for EC2-Classic: ASCII characters - /// Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* + /// The name of the security group. + /// + /// + /// Constraints: Up to 255 characters in length. Cannot start with sg-. + /// + /// + /// Constraints for EC2-Classic: ASCII characters + /// + /// + /// Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* pub name: String, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// [EC2-VPC] The ID of the VPC. Required for EC2-VPC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcID")] pub vpc_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcRef")] pub vpc_ref: Option, } @@ -85,8 +107,13 @@ pub struct SecurityGroupEgressRulesPrefixListIDs { pub prefix_list_id: Option, } -/// Describes a security group and Amazon Web Services account ID pair. -/// We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) in the Amazon Elastic Compute Cloud User Guide. +/// Describes a security group and Amazon Web Services account ID pair. +/// +/// +/// We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate +/// from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic +/// to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) +/// in the Amazon Elastic Compute Cloud User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SecurityGroupEgressRulesUserIdGroupPairs { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -151,8 +178,13 @@ pub struct SecurityGroupIngressRulesPrefixListIDs { pub prefix_list_id: Option, } -/// Describes a security group and Amazon Web Services account ID pair. -/// We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) in the Amazon Elastic Compute Cloud User Guide. +/// Describes a security group and Amazon Web Services account ID pair. +/// +/// +/// We are retiring EC2-Classic on August 15, 2022. We recommend that you migrate +/// from EC2-Classic to a VPC. For more information, see Migrate from EC2-Classic +/// to a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-migrate.html) +/// in the Amazon Elastic Compute Cloud User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SecurityGroupIngressRulesUserIdGroupPairs { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -180,16 +212,24 @@ pub struct SecurityGroupTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SecurityGroupVpcRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SecurityGroupVpcRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -199,10 +239,15 @@ pub struct SecurityGroupVpcRefFrom { /// SecurityGroupStatus defines the observed state of SecurityGroup #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SecurityGroupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The ID of the security group. @@ -213,20 +258,32 @@ pub struct SecurityGroupStatus { pub rules: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SecurityGroupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SecurityGroupStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/subnets.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/subnets.rs index 846e682ec..44171b84c 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/subnets.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/subnets.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// SubnetSpec defines the desired state of Subnet. -/// Describes a subnet. +/// SubnetSpec defines the desired state of Subnet. +/// +/// +/// Describes a subnet. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "Subnet", plural = "subnets")] #[kube(namespaced)] @@ -15,17 +17,33 @@ use serde::{Serialize, Deserialize}; pub struct SubnetSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "assignIPv6AddressOnCreation")] pub assign_i_pv6_address_on_creation: Option, - /// The Availability Zone or Local Zone for the subnet. - /// Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet. - /// To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Regions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) in the Amazon Elastic Compute Cloud User Guide. - /// To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN. + /// The Availability Zone or Local Zone for the subnet. + /// + /// + /// Default: Amazon Web Services selects one for you. If you create more than + /// one subnet in your VPC, we do not necessarily select a different zone for + /// each subnet. + /// + /// + /// To create a subnet in a Local Zone, set this value to the Local Zone ID, + /// for example us-west-2-lax-1a. For information about the Regions that support + /// Local Zones, see Available Regions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) + /// in the Amazon Elastic Compute Cloud User Guide. + /// + /// + /// To create a subnet in an Outpost, set this value to the Availability Zone + /// for the Outpost and specify the Outpost ARN. #[serde(default, skip_serializing_if = "Option::is_none", rename = "availabilityZone")] pub availability_zone: Option, /// The AZ ID or the Local Zone ID of the subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "availabilityZoneID")] pub availability_zone_id: Option, - /// The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. - /// This parameter is not supported for an IPv6 only subnet. + /// The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + /// We modify the specified CIDR block to its canonical form; for example, if + /// you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. + /// + /// + /// This parameter is not supported for an IPv6 only subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "cidrBlock")] pub cidr_block: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "customerOwnedIPv4Pool")] @@ -38,8 +56,11 @@ pub struct SubnetSpec { pub enable_resource_name_dnsa_record: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostnameType")] pub hostname_type: Option, - /// The IPv6 network range for the subnet, in CIDR notation. The subnet size must use a /64 prefix length. - /// This parameter is required for an IPv6 only subnet. + /// The IPv6 network range for the subnet, in CIDR notation. The subnet size + /// must use a /64 prefix length. + /// + /// + /// This parameter is required for an IPv6 only subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6CIDRBlock")] pub ipv6_cidr_block: Option, /// Indicates whether to create an IPv6 only subnet. @@ -47,35 +68,52 @@ pub struct SubnetSpec { pub ipv6_native: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "mapPublicIPOnLaunch")] pub map_public_ip_on_launch: Option, - /// The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost ARN, you must also specify the Availability Zone of the Outpost subnet. + /// The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost + /// ARN, you must also specify the Availability Zone of the Outpost subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "outpostARN")] pub outpost_arn: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeTableRefs")] pub route_table_refs: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeTables")] pub route_tables: Option>, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// The ID of the VPC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcID")] pub vpc_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcRef")] pub vpc_ref: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetRouteTableRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetRouteTableRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -91,16 +129,24 @@ pub struct SubnetTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetVpcRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetVpcRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -110,31 +156,42 @@ pub struct SubnetVpcRefFrom { /// SubnetStatus defines the observed state of Subnet #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// The number of unused private IPv4 addresses in the subnet. The IPv4 addresses for any stopped instances are considered unavailable. + /// The number of unused private IPv4 addresses in the subnet. The IPv4 addresses + /// for any stopped instances are considered unavailable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "availableIPAddressCount")] pub available_ip_address_count: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// Indicates whether this is the default subnet for the Availability Zone. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultForAZ")] pub default_for_az: Option, - /// Indicates the device position for local network interfaces in this subnet. For example, 1 indicates local network interfaces in this subnet are the secondary network interface (eth1). + /// Indicates the device position for local network interfaces in this subnet. + /// For example, 1 indicates local network interfaces in this subnet are the + /// secondary network interface (eth1). #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableLniAtDeviceIndex")] pub enable_lni_at_device_index: Option, /// Information about the IPv6 CIDR blocks associated with the subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6CIDRBlockAssociationSet")] pub ipv6_cidr_block_association_set: Option>, - /// Indicates whether a network interface created in this subnet (including a network interface created by RunInstances) receives a customer-owned IPv4 address. + /// Indicates whether a network interface created in this subnet (including a + /// network interface created by RunInstances) receives a customer-owned IPv4 + /// address. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mapCustomerOwnedIPOnLaunch")] pub map_customer_owned_ip_on_launch: Option, /// The ID of the Amazon Web Services account that owns the subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerID")] pub owner_id: Option, - /// The type of hostnames to assign to instances in the subnet at launch. An instance hostname is based on the IPv4 address or ID of the instance. + /// The type of hostnames to assign to instances in the subnet at launch. An + /// instance hostname is based on the IPv4 address or ID of the instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateDNSNameOptionsOnLaunch")] pub private_dns_name_options_on_launch: Option, /// The current state of the subnet. @@ -145,20 +202,32 @@ pub struct SubnetStatus { pub subnet_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetStatusConditions { /// Last time the condition transitioned from one status to another. @@ -198,7 +267,8 @@ pub struct SubnetStatusIpv6CidrBlockAssociationSetIpv6CidrBlockState { pub status_message: Option, } -/// The type of hostnames to assign to instances in the subnet at launch. An instance hostname is based on the IPv4 address or ID of the instance. +/// The type of hostnames to assign to instances in the subnet at launch. An +/// instance hostname is based on the IPv4 address or ID of the instance. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SubnetStatusPrivateDnsNameOptionsOnLaunch { #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableResourceNameDNSAAAARecord")] diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/transitgateways.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/transitgateways.rs index 39faf113e..d48d268e0 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/transitgateways.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/transitgateways.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// TransitGatewaySpec defines the desired state of TransitGateway. -/// Describes a transit gateway. +/// TransitGatewaySpec defines the desired state of TransitGateway. +/// +/// +/// Describes a transit gateway. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "TransitGateway", plural = "transitgateways")] #[kube(namespaced)] @@ -19,7 +21,9 @@ pub struct TransitGatewaySpec { /// The transit gateway options. #[serde(default, skip_serializing_if = "Option::is_none")] pub options: Option, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -57,10 +61,15 @@ pub struct TransitGatewayTags { /// TransitGatewayStatus defines the observed state of TransitGateway #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransitGatewayStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The creation time. @@ -77,20 +86,32 @@ pub struct TransitGatewayStatus { pub transit_gateway_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransitGatewayStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransitGatewayStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcendpoints.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcendpoints.rs index eebf45a14..6c5e3fbc6 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcendpoints.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcendpoints.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// VpcEndpointSpec defines the desired state of VpcEndpoint. -/// Describes a VPC endpoint. +/// VpcEndpointSpec defines the desired state of VpcEndpoint. +/// +/// +/// Describes a VPC endpoint. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "VPCEndpoint", plural = "vpcendpoints")] #[kube(namespaced)] @@ -19,12 +21,27 @@ pub struct VPCEndpointSpec { /// The IP address type for the endpoint. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddressType")] pub ip_address_type: Option, - /// (Interface and gateway endpoints) A policy to attach to the endpoint that controls access to the service. The policy must be in valid JSON format. If this parameter is not specified, we attach a default policy that allows full access to the service. + /// (Interface and gateway endpoints) A policy to attach to the endpoint that + /// controls access to the service. The policy must be in valid JSON format. + /// If this parameter is not specified, we attach a default policy that allows + /// full access to the service. #[serde(default, skip_serializing_if = "Option::is_none", rename = "policyDocument")] pub policy_document: Option, - /// (Interface endpoint) Indicates whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, kinesis.us-east-1.amazonaws.com), which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service. - /// To use a private hosted zone, you must set the following VPC attributes to true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to set the VPC attributes. - /// Default: true + /// (Interface endpoint) Indicates whether to associate a private hosted zone + /// with the specified VPC. The private hosted zone contains a record set for + /// the default public DNS name for the service for the Region (for example, + /// kinesis.us-east-1.amazonaws.com), which resolves to the private IP addresses + /// of the endpoint network interfaces in the VPC. This enables you to make requests + /// to the default public DNS name for the service instead of the public DNS + /// names that are automatically generated by the VPC endpoint service. + /// + /// + /// To use a private hosted zone, you must set the following VPC attributes to + /// true: enableDnsHostnames and enableDnsSupport. Use ModifyVpcAttribute to + /// set the VPC attributes. + /// + /// + /// Default: true #[serde(default, skip_serializing_if = "Option::is_none", rename = "privateDNSEnabled")] pub private_dns_enabled: Option, /// (Gateway endpoint) One or more route table IDs. @@ -32,31 +49,45 @@ pub struct VPCEndpointSpec { pub route_table_i_ds: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeTableRefs")] pub route_table_refs: Option>, - /// (Interface endpoint) The ID of one or more security groups to associate with the endpoint network interface. + /// (Interface endpoint) The ID of one or more security groups to associate with + /// the endpoint network interface. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] pub security_group_i_ds: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupRefs")] pub security_group_refs: Option>, - /// The service name. To get a list of available services, use the DescribeVpcEndpointServices request, or get the name from the service provider. + /// The service name. To get a list of available services, use the DescribeVpcEndpointServices + /// request, or get the name from the service provider. #[serde(rename = "serviceName")] pub service_name: String, - /// (Interface and Gateway Load Balancer endpoints) The ID of one or more subnets in which to create an endpoint network interface. For a Gateway Load Balancer endpoint, you can specify one subnet only. + /// (Interface and Gateway Load Balancer endpoints) The ID of one or more subnets + /// in which to create an endpoint network interface. For a Gateway Load Balancer + /// endpoint, you can specify one subnet only. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetIDs")] pub subnet_i_ds: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetRefs")] pub subnet_refs: Option>, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// The type of endpoint. - /// Default: Gateway + /// The type of endpoint. + /// + /// + /// Default: Gateway #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcEndpointType")] pub vpc_endpoint_type: Option, /// The ID of the VPC in which the endpoint will be used. #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcID")] pub vpc_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcRef")] pub vpc_ref: Option, } @@ -68,48 +99,72 @@ pub struct VPCEndpointDnsOptions { pub dns_record_ip_type: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointRouteTableRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointRouteTableRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointSecurityGroupRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointSecurityGroupRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointSubnetRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointSubnetRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -125,16 +180,24 @@ pub struct VPCEndpointTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointVpcRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointVpcRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -144,10 +207,15 @@ pub struct VPCEndpointVpcRefFrom { /// VPCEndpointStatus defines the observed state of VPCEndpoint #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The date and time that the endpoint was created. @@ -156,7 +224,8 @@ pub struct VPCEndpointStatus { /// (Interface endpoint) The DNS entries for the endpoint. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsEntries")] pub dns_entries: Option>, - /// (Interface endpoint) Information about the security groups that are associated with the network interface. + /// (Interface endpoint) Information about the security groups that are associated + /// with the network interface. #[serde(default, skip_serializing_if = "Option::is_none")] pub groups: Option>, /// The last error that occurred for endpoint. @@ -179,20 +248,32 @@ pub struct VPCEndpointStatus { pub vpc_endpoint_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCEndpointStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcs.rs b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcs.rs index 8e51a3272..9915539d9 100644 --- a/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcs.rs +++ b/kube-custom-resources-rs/src/ec2_services_k8s_aws/v1alpha1/vpcs.rs @@ -5,15 +5,19 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// VpcSpec defines the desired state of Vpc. -/// Describes a VPC. +/// VpcSpec defines the desired state of Vpc. +/// +/// +/// Describes a VPC. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ec2.services.k8s.aws", version = "v1alpha1", kind = "VPC", plural = "vpcs")] #[kube(namespaced)] #[kube(status = "VPCStatus")] #[kube(schema = "disabled")] pub struct VPCSpec { - /// Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IP addresses, or the size of the CIDR block. + /// Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for + /// the VPC. You cannot specify the range of IP addresses, or the size of the + /// CIDR block. #[serde(default, skip_serializing_if = "Option::is_none", rename = "amazonProvidedIPv6CIDRBlock")] pub amazon_provided_i_pv6_cidr_block: Option, #[serde(rename = "cidrBlocks")] @@ -24,35 +28,66 @@ pub struct VPCSpec { /// The attribute value. The valid values are true or false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableDNSSupport")] pub enable_dns_support: Option, - /// The tenancy options for instances launched into the VPC. For default, instances are launched with shared tenancy by default. You can launch instances with any tenancy into a shared tenancy VPC. For dedicated, instances are launched as dedicated tenancy instances by default. You can only launch instances with a tenancy of dedicated or host into a dedicated tenancy VPC. - /// Important: The host value cannot be used with this parameter. Use the default or dedicated values only. - /// Default: default + /// The tenancy options for instances launched into the VPC. For default, instances + /// are launched with shared tenancy by default. You can launch instances with + /// any tenancy into a shared tenancy VPC. For dedicated, instances are launched + /// as dedicated tenancy instances by default. You can only launch instances + /// with a tenancy of dedicated or host into a dedicated tenancy VPC. + /// + /// + /// Important: The host value cannot be used with this parameter. Use the default + /// or dedicated values only. + /// + /// + /// Default: default #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceTenancy")] pub instance_tenancy: Option, - /// The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide. + /// The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. + /// For more information, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) + /// in the Amazon VPC IPAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv4IPAMPoolID")] pub ipv4_ipam_pool_id: Option, - /// The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide. + /// The netmask length of the IPv4 CIDR you want to allocate to this VPC from + /// an Amazon VPC IP Address Manager (IPAM) pool. For more information about + /// IPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) + /// in the Amazon VPC IPAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv4NetmaskLength")] pub ipv4_netmask_length: Option, - /// The IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool in the request. - /// To let Amazon choose the IPv6 CIDR block for you, omit this parameter. + /// The IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool + /// in the request. + /// + /// + /// To let Amazon choose the IPv6 CIDR block for you, omit this parameter. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6CIDRBlock")] pub ipv6_cidr_block: Option, - /// The name of the location from which we advertise the IPV6 CIDR block. Use this parameter to limit the address to this location. - /// You must set AmazonProvidedIpv6CidrBlock to true to use this parameter. + /// The name of the location from which we advertise the IPV6 CIDR block. Use + /// this parameter to limit the address to this location. + /// + /// + /// You must set AmazonProvidedIpv6CidrBlock to true to use this parameter. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6CIDRBlockNetworkBorderGroup")] pub ipv6_cidr_block_network_border_group: Option, - /// The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide. + /// The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 + /// CIDR. IPAM is a VPC feature that you can use to automate your IP address + /// management workflows including assigning, tracking, troubleshooting, and + /// auditing IP addresses across Amazon Web Services Regions and accounts throughout + /// your Amazon Web Services Organization. For more information, see What is + /// IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) + /// in the Amazon VPC IPAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6IPAMPoolID")] pub ipv6_ipam_pool_id: Option, - /// The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) in the Amazon VPC IPAM User Guide. + /// The netmask length of the IPv6 CIDR you want to allocate to this VPC from + /// an Amazon VPC IP Address Manager (IPAM) pool. For more information about + /// IPAM, see What is IPAM? (https://docs.aws.amazon.com/vpc/latest/ipam/what-is-it-ipam.html) + /// in the Amazon VPC IPAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6NetmaskLength")] pub ipv6_netmask_length: Option, /// The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6Pool")] pub ipv6_pool: Option, - /// The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string. + /// The tags. The value parameter is required, but if you don't want the tag + /// to have a value, specify the parameter with no value, and we set the value + /// to an empty string. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -69,13 +104,18 @@ pub struct VPCTags { /// VPCStatus defines the observed state of VPC #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, /// Information about the IPv4 CIDR blocks associated with the VPC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "cidrBlockAssociationSet")] pub cidr_block_association_set: Option>, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The ID of the set of DHCP options you've associated with the VPC. @@ -98,13 +138,23 @@ pub struct VPCStatus { pub vpc_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. @@ -132,7 +182,9 @@ pub struct VPCStatusCidrBlockAssociationSetCidrBlockState { pub status_message: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VPCStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/pullthroughcacherules.rs b/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/pullthroughcacherules.rs index 38414c3c4..a5b5e1708 100644 --- a/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/pullthroughcacherules.rs +++ b/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/pullthroughcacherules.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// PullThroughCacheRuleSpec defines the desired state of PullThroughCacheRule. -/// The details of a pull through cache rule. +/// PullThroughCacheRuleSpec defines the desired state of PullThroughCacheRule. +/// +/// +/// The details of a pull through cache rule. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ecr.services.k8s.aws", version = "v1alpha1", kind = "PullThroughCacheRule", plural = "pullthroughcacherules")] #[kube(namespaced)] @@ -16,10 +18,13 @@ pub struct PullThroughCacheRuleSpec { /// The repository name prefix to use when caching images from the source registry. #[serde(rename = "ecrRepositoryPrefix")] pub ecr_repository_prefix: String, - /// The Amazon Web Services account ID associated with the registry to create the pull through cache rule for. If you do not specify a registry, the default registry is assumed. + /// The Amazon Web Services account ID associated with the registry to create + /// the pull through cache rule for. If you do not specify a registry, the default + /// registry is assumed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "registryID")] pub registry_id: Option, - /// The registry URL of the upstream public registry to use as the source for the pull through cache rule. + /// The registry URL of the upstream public registry to use as the source for + /// the pull through cache rule. #[serde(rename = "upstreamRegistryURL")] pub upstream_registry_url: String, } @@ -27,31 +32,49 @@ pub struct PullThroughCacheRuleSpec { /// PullThroughCacheRuleStatus defines the observed state of PullThroughCacheRule #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct PullThroughCacheRuleStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The date and time, in JavaScript date format, when the pull through cache rule was created. + /// The date and time, in JavaScript date format, when the pull through cache + /// rule was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "createdAt")] pub created_at: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct PullThroughCacheRuleStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct PullThroughCacheRuleStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/repositories.rs b/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/repositories.rs index ccd88a974..06baa5497 100644 --- a/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/repositories.rs +++ b/kube-custom-resources-rs/src/ecr_services_k8s_aws/v1alpha1/repositories.rs @@ -5,40 +5,57 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// RepositorySpec defines the desired state of Repository. -/// An object representing a repository. +/// RepositorySpec defines the desired state of Repository. +/// +/// +/// An object representing a repository. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "ecr.services.k8s.aws", version = "v1alpha1", kind = "Repository", plural = "repositories")] #[kube(namespaced)] #[kube(status = "RepositoryStatus")] #[kube(schema = "disabled")] pub struct RepositorySpec { - /// The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest. + /// The encryption configuration for the repository. This determines how the + /// contents of your repository are encrypted at rest. #[serde(default, skip_serializing_if = "Option::is_none", rename = "encryptionConfiguration")] pub encryption_configuration: Option, - /// The image scanning configuration for the repository. This determines whether images are scanned for known vulnerabilities after being pushed to the repository. + /// The image scanning configuration for the repository. This determines whether + /// images are scanned for known vulnerabilities after being pushed to the repository. #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageScanningConfiguration")] pub image_scanning_configuration: Option, - /// The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten. + /// The tag mutability setting for the repository. If this parameter is omitted, + /// the default setting of MUTABLE will be used which will allow image tags to + /// be overwritten. If IMMUTABLE is specified, all image tags within the repository + /// will be immutable which will prevent them from being overwritten. #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageTagMutability")] pub image_tag_mutability: Option, /// The JSON repository policy text to apply to the repository. #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecyclePolicy")] pub lifecycle_policy: Option, - /// The name to use for the repository. The repository name may be specified on its own (such as nginx-web-app) or it can be prepended with a namespace to group the repository into a category (such as project-a/nginx-web-app). + /// The name to use for the repository. The repository name may be specified + /// on its own (such as nginx-web-app) or it can be prepended with a namespace + /// to group the repository into a category (such as project-a/nginx-web-app). pub name: String, - /// The JSON repository policy text to apply to the repository. For more information, see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) in the Amazon Elastic Container Registry User Guide. + /// The JSON repository policy text to apply to the repository. For more information, + /// see Amazon ECR repository policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) + /// in the Amazon Elastic Container Registry User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub policy: Option, - /// The Amazon Web Services account ID associated with the registry to create the repository. If you do not specify a registry, the default registry is assumed. + /// The Amazon Web Services account ID associated with the registry to create + /// the repository. If you do not specify a registry, the default registry is + /// assumed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "registryID")] pub registry_id: Option, - /// The metadata that you apply to the repository to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. + /// The metadata that you apply to the repository to help you categorize and + /// organize them. Each tag consists of a key and an optional value, both of + /// which you define. Tag keys can have a maximum character length of 128 characters, + /// and tag values can have a maximum length of 256 characters. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest. +/// The encryption configuration for the repository. This determines how the +/// contents of your repository are encrypted at rest. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RepositoryEncryptionConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "encryptionType")] @@ -47,14 +64,18 @@ pub struct RepositoryEncryptionConfiguration { pub kms_key: Option, } -/// The image scanning configuration for the repository. This determines whether images are scanned for known vulnerabilities after being pushed to the repository. +/// The image scanning configuration for the repository. This determines whether +/// images are scanned for known vulnerabilities after being pushed to the repository. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RepositoryImageScanningConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "scanOnPush")] pub scan_on_push: Option, } -/// The metadata to apply to a resource to help you categorize and organize them. Each tag consists of a key and a value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. +/// The metadata to apply to a resource to help you categorize and organize them. +/// Each tag consists of a key and a value, both of which you define. Tag keys +/// can have a maximum character length of 128 characters, and tag values can +/// have a maximum length of 256 characters. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RepositoryTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -66,34 +87,52 @@ pub struct RepositoryTags { /// RepositoryStatus defines the observed state of Repository #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RepositoryStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The date and time, in JavaScript date format, when the repository was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "createdAt")] pub created_at: Option, - /// The URI for the repository. You can use this URI for container image push and pull operations. + /// The URI for the repository. You can use this URI for container image push + /// and pull operations. #[serde(default, skip_serializing_if = "Option::is_none", rename = "repositoryURI")] pub repository_uri: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RepositoryStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RepositoryStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/jobruns.rs b/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/jobruns.rs index 2e43fc486..86ff5655d 100644 --- a/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/jobruns.rs +++ b/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/jobruns.rs @@ -6,8 +6,12 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// JobRunSpec defines the desired state of JobRun. -/// This entity describes a job run. A job run is a unit of work, such as a Spark jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on EKS. +/// JobRunSpec defines the desired state of JobRun. +/// +/// +/// This entity describes a job run. A job run is a unit of work, such as a Spark +/// jar, PySpark script, or SparkSQL query, that you submit to Amazon EMR on +/// EKS. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "emrcontainers.services.k8s.aws", version = "v1alpha1", kind = "JobRun", plural = "jobruns")] #[kube(namespaced)] @@ -34,8 +38,14 @@ pub struct JobRunSpec { /// The virtual cluster ID for which the job run request is submitted. #[serde(default, skip_serializing_if = "Option::is_none", rename = "virtualClusterID")] pub virtual_cluster_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "virtualClusterRef")] pub virtual_cluster_ref: Option, } @@ -59,16 +69,24 @@ pub struct JobRunJobDriverSparkSubmitJobDriver { pub spark_submit_parameters: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct JobRunVirtualClusterRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct JobRunVirtualClusterRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -78,10 +96,15 @@ pub struct JobRunVirtualClusterRefFrom { /// JobRunStatus defines the observed state of JobRun #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct JobRunStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// This output displays the started job run ID. @@ -92,20 +115,32 @@ pub struct JobRunStatus { pub state: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct JobRunStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct JobRunStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/virtualclusters.rs b/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/virtualclusters.rs index 2e6623d36..429494e88 100644 --- a/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/virtualclusters.rs +++ b/kube-custom-resources-rs/src/emrcontainers_services_k8s_aws/v1alpha1/virtualclusters.rs @@ -6,8 +6,16 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// VirtualClusterSpec defines the desired state of VirtualCluster. -/// This entity describes a virtual cluster. A virtual cluster is a Kubernetes namespace that Amazon EMR is registered with. Amazon EMR uses virtual clusters to run jobs and host endpoints. Multiple virtual clusters can be backed by the same physical cluster. However, each virtual cluster maps to one namespace on an EKS cluster. Virtual clusters do not create any active resources that contribute to your bill or that require lifecycle management outside the service. +/// VirtualClusterSpec defines the desired state of VirtualCluster. +/// +/// +/// This entity describes a virtual cluster. A virtual cluster is a Kubernetes +/// namespace that Amazon EMR is registered with. Amazon EMR uses virtual clusters +/// to run jobs and host endpoints. Multiple virtual clusters can be backed by +/// the same physical cluster. However, each virtual cluster maps to one namespace +/// on an EKS cluster. Virtual clusters do not create any active resources that +/// contribute to your bill or that require lifecycle management outside the +/// service. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "emrcontainers.services.k8s.aws", version = "v1alpha1", kind = "VirtualCluster", plural = "virtualclusters")] #[kube(namespaced)] @@ -54,10 +62,15 @@ pub struct VirtualClusterContainerProviderInfoEksInfo { /// VirtualClusterStatus defines the observed state of VirtualCluster #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VirtualClusterStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// This output contains the virtual cluster ID. @@ -65,20 +78,32 @@ pub struct VirtualClusterStatus { pub id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VirtualClusterStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct VirtualClusterStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/external_secrets_io/v1beta1/clustersecretstores.rs b/kube-custom-resources-rs/src/external_secrets_io/v1beta1/clustersecretstores.rs index f2f522a96..a7a127bcd 100644 --- a/kube-custom-resources-rs/src/external_secrets_io/v1beta1/clustersecretstores.rs +++ b/kube-custom-resources-rs/src/external_secrets_io/v1beta1/clustersecretstores.rs @@ -86,6 +86,9 @@ pub struct ClusterSecretStoreProvider { /// AzureKV configures this store to sync secrets using Azure Key Vault provider #[serde(default, skip_serializing_if = "Option::is_none")] pub azurekv: Option, + /// Chef configures this store to sync secrets with chef server + #[serde(default, skip_serializing_if = "Option::is_none")] + pub chef: Option, /// Conjur configures this store to sync secrets using conjur provider #[serde(default, skip_serializing_if = "Option::is_none")] pub conjur: Option, @@ -120,6 +123,9 @@ pub struct ClusterSecretStoreProvider { /// Oracle configures this store to sync secrets using Oracle Vault provider #[serde(default, skip_serializing_if = "Option::is_none")] pub oracle: Option, + /// Pulumi configures this store to sync secrets using the Pulumi provider + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pulumi: Option, /// Scaleway #[serde(default, skip_serializing_if = "Option::is_none")] pub scaleway: Option, @@ -682,6 +688,50 @@ pub struct ClusterSecretStoreProviderAzurekvServiceAccountRef { pub namespace: Option, } +/// Chef configures this store to sync secrets with chef server +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterSecretStoreProviderChef { + /// Auth defines the information necessary to authenticate against chef Server + pub auth: ClusterSecretStoreProviderChefAuth, + /// ServerURL is the chef server URL used to connect to. If using orgs you should include your org in the url and terminate the url with a "/" + #[serde(rename = "serverUrl")] + pub server_url: String, + /// UserName should be the user ID on the chef server + pub username: String, +} + +/// Auth defines the information necessary to authenticate against chef Server +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterSecretStoreProviderChefAuth { + /// ChefAuthSecretRef holds secret references for chef server login credentials. + #[serde(rename = "secretRef")] + pub secret_ref: ClusterSecretStoreProviderChefAuthSecretRef, +} + +/// ChefAuthSecretRef holds secret references for chef server login credentials. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterSecretStoreProviderChefAuthSecretRef { + /// SecretKey is the Signing Key in PEM format, used for authentication. + #[serde(rename = "privateKeySecretRef")] + pub private_key_secret_ref: ClusterSecretStoreProviderChefAuthSecretRefPrivateKeySecretRef, +} + +/// SecretKey is the Signing Key in PEM format, used for authentication. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterSecretStoreProviderChefAuthSecretRefPrivateKeySecretRef { + /// The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + /// defaulted, in others it may be required. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// The name of the Secret resource being referred to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + /// to the namespace of the referent. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, +} + /// Conjur configures this store to sync secrets using conjur provider #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterSecretStoreProviderConjur { @@ -1524,6 +1574,49 @@ pub struct ClusterSecretStoreProviderOracleServiceAccountRef { pub namespace: Option, } +/// Pulumi configures this store to sync secrets using the Pulumi provider +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterSecretStoreProviderPulumi { + /// AccessToken is the access tokens to sign in to the Pulumi Cloud Console. + #[serde(rename = "accessToken")] + pub access_token: ClusterSecretStoreProviderPulumiAccessToken, + /// APIURL is the URL of the Pulumi API. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiUrl")] + pub api_url: Option, + /// Environment are YAML documents composed of static key-value pairs, programmatic expressions, + /// dynamically retrieved values from supported providers including all major clouds, + /// and other Pulumi ESC environments. + /// To create a new environment, visit https://www.pulumi.com/docs/esc/environments/ for more information. + pub environment: String, + /// Organization are a space to collaborate on shared projects and stacks. + /// To create a new organization, visit https://app.pulumi.com/ and click "New Organization". + pub organization: String, +} + +/// AccessToken is the access tokens to sign in to the Pulumi Cloud Console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterSecretStoreProviderPulumiAccessToken { + /// SecretRef is a reference to a secret containing the Pulumi API token. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] + pub secret_ref: Option, +} + +/// SecretRef is a reference to a secret containing the Pulumi API token. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ClusterSecretStoreProviderPulumiAccessTokenSecretRef { + /// The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + /// defaulted, in others it may be required. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// The name of the Secret resource being referred to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + /// to the namespace of the referent. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, +} + /// Scaleway #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterSecretStoreProviderScaleway { diff --git a/kube-custom-resources-rs/src/external_secrets_io/v1beta1/secretstores.rs b/kube-custom-resources-rs/src/external_secrets_io/v1beta1/secretstores.rs index a061e13e3..b2b323f5a 100644 --- a/kube-custom-resources-rs/src/external_secrets_io/v1beta1/secretstores.rs +++ b/kube-custom-resources-rs/src/external_secrets_io/v1beta1/secretstores.rs @@ -87,6 +87,9 @@ pub struct SecretStoreProvider { /// AzureKV configures this store to sync secrets using Azure Key Vault provider #[serde(default, skip_serializing_if = "Option::is_none")] pub azurekv: Option, + /// Chef configures this store to sync secrets with chef server + #[serde(default, skip_serializing_if = "Option::is_none")] + pub chef: Option, /// Conjur configures this store to sync secrets using conjur provider #[serde(default, skip_serializing_if = "Option::is_none")] pub conjur: Option, @@ -121,6 +124,9 @@ pub struct SecretStoreProvider { /// Oracle configures this store to sync secrets using Oracle Vault provider #[serde(default, skip_serializing_if = "Option::is_none")] pub oracle: Option, + /// Pulumi configures this store to sync secrets using the Pulumi provider + #[serde(default, skip_serializing_if = "Option::is_none")] + pub pulumi: Option, /// Scaleway #[serde(default, skip_serializing_if = "Option::is_none")] pub scaleway: Option, @@ -683,6 +689,50 @@ pub struct SecretStoreProviderAzurekvServiceAccountRef { pub namespace: Option, } +/// Chef configures this store to sync secrets with chef server +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SecretStoreProviderChef { + /// Auth defines the information necessary to authenticate against chef Server + pub auth: SecretStoreProviderChefAuth, + /// ServerURL is the chef server URL used to connect to. If using orgs you should include your org in the url and terminate the url with a "/" + #[serde(rename = "serverUrl")] + pub server_url: String, + /// UserName should be the user ID on the chef server + pub username: String, +} + +/// Auth defines the information necessary to authenticate against chef Server +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SecretStoreProviderChefAuth { + /// ChefAuthSecretRef holds secret references for chef server login credentials. + #[serde(rename = "secretRef")] + pub secret_ref: SecretStoreProviderChefAuthSecretRef, +} + +/// ChefAuthSecretRef holds secret references for chef server login credentials. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SecretStoreProviderChefAuthSecretRef { + /// SecretKey is the Signing Key in PEM format, used for authentication. + #[serde(rename = "privateKeySecretRef")] + pub private_key_secret_ref: SecretStoreProviderChefAuthSecretRefPrivateKeySecretRef, +} + +/// SecretKey is the Signing Key in PEM format, used for authentication. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SecretStoreProviderChefAuthSecretRefPrivateKeySecretRef { + /// The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + /// defaulted, in others it may be required. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// The name of the Secret resource being referred to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + /// to the namespace of the referent. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, +} + /// Conjur configures this store to sync secrets using conjur provider #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct SecretStoreProviderConjur { @@ -1525,6 +1575,49 @@ pub struct SecretStoreProviderOracleServiceAccountRef { pub namespace: Option, } +/// Pulumi configures this store to sync secrets using the Pulumi provider +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SecretStoreProviderPulumi { + /// AccessToken is the access tokens to sign in to the Pulumi Cloud Console. + #[serde(rename = "accessToken")] + pub access_token: SecretStoreProviderPulumiAccessToken, + /// APIURL is the URL of the Pulumi API. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiUrl")] + pub api_url: Option, + /// Environment are YAML documents composed of static key-value pairs, programmatic expressions, + /// dynamically retrieved values from supported providers including all major clouds, + /// and other Pulumi ESC environments. + /// To create a new environment, visit https://www.pulumi.com/docs/esc/environments/ for more information. + pub environment: String, + /// Organization are a space to collaborate on shared projects and stacks. + /// To create a new organization, visit https://app.pulumi.com/ and click "New Organization". + pub organization: String, +} + +/// AccessToken is the access tokens to sign in to the Pulumi Cloud Console. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SecretStoreProviderPulumiAccessToken { + /// SecretRef is a reference to a secret containing the Pulumi API token. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] + pub secret_ref: Option, +} + +/// SecretRef is a reference to a secret containing the Pulumi API token. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct SecretStoreProviderPulumiAccessTokenSecretRef { + /// The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be + /// defaulted, in others it may be required. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub key: Option, + /// The name of the Secret resource being referred to. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults + /// to the namespace of the referent. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, +} + /// Scaleway #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct SecretStoreProviderScaleway { diff --git a/kube-custom-resources-rs/src/flows_netobserv_io/v1alpha1/flowcollectors.rs b/kube-custom-resources-rs/src/flows_netobserv_io/v1alpha1/flowcollectors.rs index aee3dba9b..983d0b070 100644 --- a/kube-custom-resources-rs/src/flows_netobserv_io/v1alpha1/flowcollectors.rs +++ b/kube-custom-resources-rs/src/flows_netobserv_io/v1alpha1/flowcollectors.rs @@ -73,7 +73,7 @@ pub struct FlowCollectorAgentEbpf { /// interfaces contains the interface names from where flows will be collected. If empty, the agent will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. If an entry is enclosed by slashes (such as `/br-/`), it will match as regular expression, otherwise it will be matched as a case-sensitive string. #[serde(default, skip_serializing_if = "Option::is_none")] pub interfaces: Option>, - /// kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB. + /// kafkaBatchSize limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaBatchSize")] pub kafka_batch_size: Option, /// logLevel defines the log level for the NetObserv eBPF Agent diff --git a/kube-custom-resources-rs/src/flows_netobserv_io/v1beta1/flowcollectors.rs b/kube-custom-resources-rs/src/flows_netobserv_io/v1beta1/flowcollectors.rs index e1c7a6efc..a2063384a 100644 --- a/kube-custom-resources-rs/src/flows_netobserv_io/v1beta1/flowcollectors.rs +++ b/kube-custom-resources-rs/src/flows_netobserv_io/v1beta1/flowcollectors.rs @@ -62,7 +62,7 @@ pub struct FlowCollectorAgentEbpf { /// `cacheMaxFlows` is the max number of flows in an aggregate; when reached, the reporter sends the flows. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. #[serde(default, skip_serializing_if = "Option::is_none", rename = "cacheMaxFlows")] pub cache_max_flows: Option, - /// `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. + /// `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[serde(default, skip_serializing_if = "Option::is_none")] pub debug: Option, /// `excludeInterfaces` contains the interface names that are excluded from flow tracing. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string. @@ -77,13 +77,13 @@ pub struct FlowCollectorAgentEbpf { /// `interfaces` contains the interface names from where flows are collected. If empty, the agent fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string. #[serde(default, skip_serializing_if = "Option::is_none")] pub interfaces: Option>, - /// `kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB. + /// `kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaBatchSize")] pub kafka_batch_size: Option, /// `logLevel` defines the log level for the NetObserv eBPF Agent #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] pub log_level: Option, - /// Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`). + /// Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`) and SR-IOV support. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, /// `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -94,7 +94,7 @@ pub struct FlowCollectorAgentEbpf { pub sampling: Option, } -/// `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. +/// `debug` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorAgentEbpfDebug { /// `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. @@ -212,7 +212,7 @@ pub struct FlowCollectorConsolePlugin { /// `autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment. #[serde(default, skip_serializing_if = "Option::is_none")] pub autoscaler: Option, - /// Enables the console plugin deployment. `spec.Loki.enable` must also be `true` + /// Enables the console plugin deployment. `spec.loki.enable` must also be `true` #[serde(default, skip_serializing_if = "Option::is_none")] pub enable: Option, /// `imagePullPolicy` is the Kubernetes pull policy for the image defined above @@ -1230,7 +1230,7 @@ pub struct FlowCollectorProcessor { /// `conversationTerminatingTimeout` is the time to wait from detected FIN flag to end a conversation. Only relevant for TCP flows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "conversationTerminatingTimeout")] pub conversation_terminating_timeout: Option, - /// `debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. + /// `debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[serde(default, skip_serializing_if = "Option::is_none")] pub debug: Option, /// `dropUnusedFields` allows, when set to `true`, to drop fields that are known to be unused by OVS, to save storage space. @@ -1280,7 +1280,7 @@ pub struct FlowCollectorProcessor { pub resources: Option, } -/// `debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. +/// `debug` allows setting some aspects of the internal configuration of the flow processor. This section is aimed exclusively for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorProcessorDebug { /// `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. diff --git a/kube-custom-resources-rs/src/flows_netobserv_io/v1beta2/flowcollectors.rs b/kube-custom-resources-rs/src/flows_netobserv_io/v1beta2/flowcollectors.rs index 6bdb783c6..d158475df 100644 --- a/kube-custom-resources-rs/src/flows_netobserv_io/v1beta2/flowcollectors.rs +++ b/kube-custom-resources-rs/src/flows_netobserv_io/v1beta2/flowcollectors.rs @@ -56,7 +56,7 @@ pub struct FlowCollectorAgent { /// `ebpf` describes the settings related to the eBPF-based flow reporter when `spec.agent.type` is set to `eBPF`. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorAgentEbpf { - /// `advanced` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. + /// `advanced` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[serde(default, skip_serializing_if = "Option::is_none")] pub advanced: Option, /// `cacheActiveTimeout` is the max period during which the reporter aggregates flows before sending. Increasing `cacheMaxFlows` and `cacheActiveTimeout` can decrease the network traffic overhead and the CPU load, however you can expect higher memory consumption and an increased latency in the flow collection. @@ -77,13 +77,13 @@ pub struct FlowCollectorAgentEbpf { /// `interfaces` contains the interface names from where flows are collected. If empty, the agent fetches all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. An entry enclosed by slashes, such as `/br-/`, is matched as a regular expression. Otherwise it is matched as a case-sensitive string. #[serde(default, skip_serializing_if = "Option::is_none")] pub interfaces: Option>, - /// `kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 10MB. + /// `kafkaBatchSize` limits the maximum size of a request in bytes before being sent to a partition. Ignored when not using Kafka. Default: 1MB. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kafkaBatchSize")] pub kafka_batch_size: Option, /// `logLevel` defines the log level for the NetObserv eBPF Agent #[serde(default, skip_serializing_if = "Option::is_none", rename = "logLevel")] pub log_level: Option, - /// Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`). + /// Privileged mode for the eBPF Agent container. When ignored or set to `false`, the operator sets granular capabilities (BPF, PERFMON, NET_ADMIN, SYS_RESOURCE) to the container. If for some reason these capabilities cannot be set, such as if an old kernel version not knowing CAP_BPF is in use, then you can turn on this mode for more global privileges. Some agent features require the privileged mode, such as packet drops tracking (see `features`) and SR-IOV support. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, /// `resources` are the compute resources required by this container. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -94,7 +94,7 @@ pub struct FlowCollectorAgentEbpf { pub sampling: Option, } -/// `advanced` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. +/// `advanced` allows setting some aspects of the internal configuration of the eBPF agent. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorAgentEbpfAdvanced { /// `env` allows passing custom environment variables to underlying components. Useful for passing some very concrete performance-tuning options, such as `GOGC` and `GOMAXPROCS`, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. @@ -209,13 +209,13 @@ pub enum FlowCollectorAgentType { /// `consolePlugin` defines the settings related to the OpenShift Console plugin, when available. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorConsolePlugin { - /// `advanced` allows setting some aspects of the internal configuration of the console plugin. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. + /// `advanced` allows setting some aspects of the internal configuration of the console plugin. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[serde(default, skip_serializing_if = "Option::is_none")] pub advanced: Option, /// `autoscaler` spec of a horizontal pod autoscaler to set up for the plugin Deployment. #[serde(default, skip_serializing_if = "Option::is_none")] pub autoscaler: Option, - /// Enables the console plugin deployment. `spec.Loki.enable` must also be `true` + /// Enables the console plugin deployment. `spec.loki.enable` must also be `true` #[serde(default, skip_serializing_if = "Option::is_none")] pub enable: Option, /// `imagePullPolicy` is the Kubernetes pull policy for the image defined above @@ -238,7 +238,7 @@ pub struct FlowCollectorConsolePlugin { pub resources: Option, } -/// `advanced` allows setting some aspects of the internal configuration of the console plugin. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. +/// `advanced` allows setting some aspects of the internal configuration of the console plugin. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorConsolePluginAdvanced { /// `args` allows passing custom arguments to underlying components. Useful for overriding some parameters, such as an url or a configuration path, that should not be publicly exposed as part of the FlowCollector descriptor, as they are only useful in edge debug or support scenarios. @@ -1012,19 +1012,19 @@ pub struct FlowCollectorLoki { /// Set `enable` to `true` to store flows in Loki. It is required for the OpenShift Console plugin installation. #[serde(default, skip_serializing_if = "Option::is_none")] pub enable: Option, - /// Loki configuration for "LokiStack" mode. This is useful for an easy loki-operator configuration. It is ignored for other modes. + /// Loki configuration for `LokiStack` mode. This is useful for an easy loki-operator configuration. It is ignored for other modes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "lokiStack")] pub loki_stack: Option, - /// Loki configuration for "Manual" mode. This is the most flexible configuration. It is ignored for other modes. + /// Loki configuration for `Manual` mode. This is the most flexible configuration. It is ignored for other modes. #[serde(default, skip_serializing_if = "Option::is_none")] pub manual: Option, - /// Loki configuration for "Microservices" mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes. + /// Loki configuration for `Microservices` mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes. #[serde(default, skip_serializing_if = "Option::is_none")] pub microservices: Option, - /// `mode` must be set according to the installation mode of Loki:
- Use "LokiStack" when Loki is managed using the Loki Operator
- Use "Monolithic" when Loki is installed as a monolithic workload
- Use "Microservices" when Loki is installed as microservices, but without Loki Operator
- Use "Manual" if none of the options above match your setup
+ /// `mode` must be set according to the installation mode of Loki:
- Use `LokiStack` when Loki is managed using the Loki Operator
- Use `Monolithic` when Loki is installed as a monolithic workload
- Use `Microservices` when Loki is installed as microservices, but without Loki Operator
- Use `Manual` if none of the options above match your setup
#[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// Loki configuration for "Monolithic" mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes. + /// Loki configuration for `Monolithic` mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes. #[serde(default, skip_serializing_if = "Option::is_none")] pub monolithic: Option, /// `readTimeout` is the maximum console plugin loki query total time limit. A timeout of zero means no timeout. @@ -1058,7 +1058,7 @@ pub struct FlowCollectorLokiAdvanced { pub write_min_backoff: Option, } -/// Loki configuration for "LokiStack" mode. This is useful for an easy loki-operator configuration. It is ignored for other modes. +/// Loki configuration for `LokiStack` mode. This is useful for an easy loki-operator configuration. It is ignored for other modes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorLokiLokiStack { /// Name of an existing LokiStack resource to use. @@ -1069,7 +1069,7 @@ pub struct FlowCollectorLokiLokiStack { pub namespace: Option, } -/// Loki configuration for "Manual" mode. This is the most flexible configuration. It is ignored for other modes. +/// Loki configuration for `Manual` mode. This is the most flexible configuration. It is ignored for other modes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorLokiManual { /// `authToken` describes the way to get a token to authenticate to Loki.
- `Disabled` does not send any token with the request.
- `Forward` forwards the user token for authorization.
- `Host` [deprecated (*)] - uses the local pod service account to authenticate to Loki.
When using the Loki Operator, this must be set to `Forward`. @@ -1095,7 +1095,7 @@ pub struct FlowCollectorLokiManual { pub tls: Option, } -/// Loki configuration for "Manual" mode. This is the most flexible configuration. It is ignored for other modes. +/// Loki configuration for `Manual` mode. This is the most flexible configuration. It is ignored for other modes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum FlowCollectorLokiManualAuthToken { Disabled, @@ -1253,7 +1253,7 @@ pub enum FlowCollectorLokiManualTlsUserCertType { Secret, } -/// Loki configuration for "Microservices" mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes. +/// Loki configuration for `Microservices` mode. Use this option when Loki is installed using the microservices deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode). It is ignored for other modes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorLokiMicroservices { /// `ingesterUrl` is the address of an existing Loki ingester service to push the flows to. @@ -1354,7 +1354,7 @@ pub enum FlowCollectorLokiMode { Microservices, } -/// Loki configuration for "Monolithic" mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes. +/// Loki configuration for `Monolithic` mode. Use this option when Loki is installed using the monolithic deployment mode (https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#monolithic-mode). It is ignored for other modes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorLokiMonolithic { /// `tenantID` is the Loki `X-Scope-OrgID` header that identifies the tenant for each request. @@ -1449,7 +1449,7 @@ pub struct FlowCollectorProcessor { /// `addZone` allows availability zone awareness by labelling flows with their source and destination zones. This feature requires the "topology.kubernetes.io/zone" label to be set on nodes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "addZone")] pub add_zone: Option, - /// `advanced` allows setting some aspects of the internal configuration of the flow processor. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. + /// `advanced` allows setting some aspects of the internal configuration of the flow processor. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[serde(default, skip_serializing_if = "Option::is_none")] pub advanced: Option, /// `clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using OpenShift, leave empty to make it automatically determined. @@ -1487,7 +1487,7 @@ pub struct FlowCollectorProcessor { pub resources: Option, } -/// `advanced` allows setting some aspects of the internal configuration of the flow processor. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Users setting its values do it at their own risk. +/// `advanced` allows setting some aspects of the internal configuration of the flow processor. This section is aimed mostly for debugging and fine-grained performance optimizations, such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct FlowCollectorProcessorAdvanced { /// `conversationEndTimeout` is the time to wait after a network flow is received, to consider the conversation ended. This delay is ignored when a FIN packet is collected for TCP flows (see `conversationTerminatingTimeout` instead). diff --git a/kube-custom-resources-rs/src/forklift_konveyor_io/v1beta1/hosts.rs b/kube-custom-resources-rs/src/forklift_konveyor_io/v1beta1/hosts.rs index 841156b1c..236c91314 100644 --- a/kube-custom-resources-rs/src/forklift_konveyor_io/v1beta1/hosts.rs +++ b/kube-custom-resources-rs/src/forklift_konveyor_io/v1beta1/hosts.rs @@ -28,9 +28,6 @@ pub struct HostSpec { pub provider: HostProvider, /// Credentials. pub secret: HostSecret, - /// Certificate SHA-1 fingerprint, called thumbprint by VMware. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub thumbprint: Option, /// Type used to qualify the name. #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] pub r#type: Option, diff --git a/kube-custom-resources-rs/src/hazelcast_com/v1alpha1/managementcenters.rs b/kube-custom-resources-rs/src/hazelcast_com/v1alpha1/managementcenters.rs index 0fcf84ad1..9f6d331d8 100644 --- a/kube-custom-resources-rs/src/hazelcast_com/v1alpha1/managementcenters.rs +++ b/kube-custom-resources-rs/src/hazelcast_com/v1alpha1/managementcenters.rs @@ -86,6 +86,9 @@ pub struct ManagementCenterExternalConnectivityIngress { /// IngressClassName of the ingress object. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ingressClassName")] pub ingress_class_name: Option, + /// Path of the ingress rule. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, } /// OpenShift Route configuration of Management Center diff --git a/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeployments.rs b/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeployments.rs index a14180901..cdc096429 100644 --- a/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeployments.rs +++ b/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeployments.rs @@ -322,9 +322,6 @@ pub struct ClusterDeploymentPlatform { /// AgentBareMetal is the configuration used when performing an Assisted Agent based installation to bare metal. #[serde(default, skip_serializing_if = "Option::is_none", rename = "agentBareMetal")] pub agent_bare_metal: Option, - /// AlibabaCloud is the configuration used when installing on Alibaba Cloud - #[serde(default, skip_serializing_if = "Option::is_none")] - pub alibabacloud: Option, /// AWS is the configuration used when installing on AWS. #[serde(default, skip_serializing_if = "Option::is_none")] pub aws: Option, @@ -385,24 +382,6 @@ pub struct ClusterDeploymentPlatformAgentBareMetalAgentSelectorMatchExpressions pub values: Option>, } -/// AlibabaCloud is the configuration used when installing on Alibaba Cloud -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct ClusterDeploymentPlatformAlibabacloud { - /// CredentialsSecretRef refers to a secret that contains Alibaba Cloud account access credentials. - #[serde(rename = "credentialsSecretRef")] - pub credentials_secret_ref: ClusterDeploymentPlatformAlibabacloudCredentialsSecretRef, - /// Region specifies the Alibaba Cloud region where the cluster will be created. - pub region: String, -} - -/// CredentialsSecretRef refers to a secret that contains Alibaba Cloud account access credentials. -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct ClusterDeploymentPlatformAlibabacloudCredentialsSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? - #[serde(default, skip_serializing_if = "Option::is_none")] - pub name: Option, -} - /// AWS is the configuration used when installing on AWS. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ClusterDeploymentPlatformAws { diff --git a/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeprovisions.rs b/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeprovisions.rs index 3137de10c..1a9480688 100644 --- a/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeprovisions.rs +++ b/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterdeprovisions.rs @@ -32,9 +32,6 @@ pub struct ClusterDeprovisionSpec { /// Platform contains platform-specific configuration for a ClusterDeprovision #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ClusterDeprovisionPlatform { - /// AlibabaCloud contains Alibaba Cloud specific deprovision settings - #[serde(default, skip_serializing_if = "Option::is_none")] - pub alibabacloud: Option, /// AWS contains AWS-specific deprovision settings #[serde(default, skip_serializing_if = "Option::is_none")] pub aws: Option, @@ -58,27 +55,6 @@ pub struct ClusterDeprovisionPlatform { pub vsphere: Option, } -/// AlibabaCloud contains Alibaba Cloud specific deprovision settings -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct ClusterDeprovisionPlatformAlibabacloud { - /// BaseDomain is the DNS base domain. TODO: Use the non-platform-specific BaseDomain field. - #[serde(rename = "baseDomain")] - pub base_domain: String, - /// CredentialsSecretRef is the Alibaba account credentials to use for deprovisioning the cluster - #[serde(rename = "credentialsSecretRef")] - pub credentials_secret_ref: ClusterDeprovisionPlatformAlibabacloudCredentialsSecretRef, - /// Region is the Alibaba region for this deprovision - pub region: String, -} - -/// CredentialsSecretRef is the Alibaba account credentials to use for deprovisioning the cluster -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct ClusterDeprovisionPlatformAlibabacloudCredentialsSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? - #[serde(default, skip_serializing_if = "Option::is_none")] - pub name: Option, -} - /// AWS contains AWS-specific deprovision settings #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ClusterDeprovisionPlatformAws { diff --git a/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterpools.rs b/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterpools.rs index 344babcac..1716bba89 100644 --- a/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterpools.rs +++ b/kube-custom-resources-rs/src/hive_openshift_io/v1/clusterpools.rs @@ -123,9 +123,6 @@ pub struct ClusterPoolPlatform { /// AgentBareMetal is the configuration used when performing an Assisted Agent based installation to bare metal. #[serde(default, skip_serializing_if = "Option::is_none", rename = "agentBareMetal")] pub agent_bare_metal: Option, - /// AlibabaCloud is the configuration used when installing on Alibaba Cloud - #[serde(default, skip_serializing_if = "Option::is_none")] - pub alibabacloud: Option, /// AWS is the configuration used when installing on AWS. #[serde(default, skip_serializing_if = "Option::is_none")] pub aws: Option, @@ -186,24 +183,6 @@ pub struct ClusterPoolPlatformAgentBareMetalAgentSelectorMatchExpressions { pub values: Option>, } -/// AlibabaCloud is the configuration used when installing on Alibaba Cloud -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct ClusterPoolPlatformAlibabacloud { - /// CredentialsSecretRef refers to a secret that contains Alibaba Cloud account access credentials. - #[serde(rename = "credentialsSecretRef")] - pub credentials_secret_ref: ClusterPoolPlatformAlibabacloudCredentialsSecretRef, - /// Region specifies the Alibaba Cloud region where the cluster will be created. - pub region: String, -} - -/// CredentialsSecretRef refers to a secret that contains Alibaba Cloud account access credentials. -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct ClusterPoolPlatformAlibabacloudCredentialsSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? - #[serde(default, skip_serializing_if = "Option::is_none")] - pub name: Option, -} - /// AWS is the configuration used when installing on AWS. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ClusterPoolPlatformAws { diff --git a/kube-custom-resources-rs/src/hive_openshift_io/v1/machinepools.rs b/kube-custom-resources-rs/src/hive_openshift_io/v1/machinepools.rs index 7788979ad..33161d9fe 100644 --- a/kube-custom-resources-rs/src/hive_openshift_io/v1/machinepools.rs +++ b/kube-custom-resources-rs/src/hive_openshift_io/v1/machinepools.rs @@ -56,9 +56,6 @@ pub struct MachinePoolClusterDeploymentRef { /// Platform is configuration for machine pool specific to the platform. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MachinePoolPlatform { - /// AlibabaCloud is the configuration used when installing on Alibaba Cloud. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub alibabacloud: Option, /// AWS is the configuration used when installing on AWS. #[serde(default, skip_serializing_if = "Option::is_none")] pub aws: Option, @@ -82,37 +79,6 @@ pub struct MachinePoolPlatform { pub vsphere: Option, } -/// AlibabaCloud is the configuration used when installing on Alibaba Cloud. -#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] -pub struct MachinePoolPlatformAlibabacloud { - /// ImageID is the Image ID that should be used to create ECS instance. If set, the ImageID should belong to the same region as the cluster. - #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageID")] - pub image_id: Option, - /// InstanceType defines the ECS instance type. eg. ecs.g6.large - #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] - pub instance_type: Option, - /// SystemDiskCategory defines the category of the system disk. - #[serde(default, skip_serializing_if = "Option::is_none", rename = "systemDiskCategory")] - pub system_disk_category: Option, - /// SystemDiskSize defines the size of the system disk in gibibytes (GiB). - #[serde(default, skip_serializing_if = "Option::is_none", rename = "systemDiskSize")] - pub system_disk_size: Option, - /// Zones is list of availability zones that can be used. eg. ["cn-hangzhou-i", "cn-hangzhou-h", "cn-hangzhou-j"] - #[serde(default, skip_serializing_if = "Option::is_none")] - pub zones: Option>, -} - -/// AlibabaCloud is the configuration used when installing on Alibaba Cloud. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub enum MachinePoolPlatformAlibabacloudSystemDiskCategory { - #[serde(rename = "")] - KopiumEmpty, - #[serde(rename = "cloud_efficiency")] - CloudEfficiency, - #[serde(rename = "cloud_essd")] - CloudEssd, -} - /// AWS is the configuration used when installing on AWS. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MachinePoolPlatformAws { diff --git a/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/groups.rs b/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/groups.rs index b62808ec7..80bcec986 100644 --- a/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/groups.rs +++ b/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/groups.rs @@ -6,12 +6,22 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// GroupSpec defines the desired state of Group. -/// Contains information about an IAM group entity. -/// This data type is used as a response element in the following operations: -/// * CreateGroup -/// * GetGroup -/// * ListGroups +/// GroupSpec defines the desired state of Group. +/// +/// +/// Contains information about an IAM group entity. +/// +/// +/// This data type is used as a response element in the following operations: +/// +/// +/// * CreateGroup +/// +/// +/// * GetGroup +/// +/// +/// * ListGroups #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "iam.services.k8s.aws", version = "v1alpha1", kind = "Group", plural = "groups")] #[kube(namespaced)] @@ -20,12 +30,28 @@ use std::collections::BTreeMap; pub struct GroupSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "inlinePolicies")] pub inline_policies: Option>, - /// The name of the group to create. Do not include the path in this value. - /// IAM user, group, role, and policy names must be unique within the account. Names are not distinguished by case. For example, you cannot create resources named both "MyResource" and "myresource". + /// The name of the group to create. Do not include the path in this value. + /// + /// + /// IAM user, group, role, and policy names must be unique within the account. + /// Names are not distinguished by case. For example, you cannot create resources + /// named both "MyResource" and "myresource". pub name: String, - /// The path to the group. For more information about paths, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. - /// This parameter is optional. If it is not included, it defaults to a slash (/). - /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\u0021) through the DEL character (\u007F), including most punctuation characters, digits, and upper and lowercased letters. + /// The path to the group. For more information about paths, see IAM identifiers + /// (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + /// in the IAM User Guide. + /// + /// + /// This parameter is optional. If it is not included, it defaults to a slash + /// (/). + /// + /// + /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + /// a string of characters consisting of either a forward slash (/) by itself + /// or a string that must begin and end with forward slashes. In addition, it + /// can contain any ASCII character from the ! (\u0021) through the DEL character + /// (\u007F), including most punctuation characters, digits, and upper and lowercased + /// letters. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -34,16 +60,24 @@ pub struct GroupSpec { pub policy_refs: Option>, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GroupPolicyRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GroupPolicyRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -53,34 +87,54 @@ pub struct GroupPolicyRefsFrom { /// GroupStatus defines the observed state of Group #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GroupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the group was created. + /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + /// when the group was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "createDate")] pub create_date: Option, - /// The stable and unique string identifying the group. For more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. + /// The stable and unique string identifying the group. For more information + /// about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + /// in the IAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "groupID")] pub group_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GroupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GroupStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/policies.rs b/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/policies.rs index 25a4a8f14..853a9d2da 100644 --- a/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/policies.rs +++ b/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/policies.rs @@ -5,48 +5,118 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// PolicySpec defines the desired state of Policy. -/// Contains information about a managed policy. -/// This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations. -/// For more information about managed policies, refer to Managed policies and inline policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) in the IAM User Guide. +/// PolicySpec defines the desired state of Policy. +/// +/// +/// Contains information about a managed policy. +/// +/// +/// This data type is used as a response element in the CreatePolicy, GetPolicy, +/// and ListPolicies operations. +/// +/// +/// For more information about managed policies, refer to Managed policies and +/// inline policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +/// in the IAM User Guide. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "iam.services.k8s.aws", version = "v1alpha1", kind = "Policy", plural = "policies")] #[kube(namespaced)] #[kube(status = "PolicyStatus")] #[kube(schema = "disabled")] pub struct PolicySpec { - /// A friendly description of the policy. - /// Typically used to store information about the permissions defined in the policy. For example, "Grants access to production DynamoDB tables." - /// The policy description is immutable. After a value is assigned, it cannot be changed. + /// A friendly description of the policy. + /// + /// + /// Typically used to store information about the permissions defined in the + /// policy. For example, "Grants access to production DynamoDB tables." + /// + /// + /// The policy description is immutable. After a value is assigned, it cannot + /// be changed. #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option, - /// The friendly name of the policy. - /// IAM user, group, role, and policy names must be unique within the account. Names are not distinguished by case. For example, you cannot create resources named both "MyResource" and "myresource". + /// The friendly name of the policy. + /// + /// + /// IAM user, group, role, and policy names must be unique within the account. + /// Names are not distinguished by case. For example, you cannot create resources + /// named both "MyResource" and "myresource". pub name: String, - /// The path for the policy. - /// For more information about paths, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. - /// This parameter is optional. If it is not included, it defaults to a slash (/). - /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\u0021) through the DEL character (\u007F), including most punctuation characters, digits, and upper and lowercased letters. - /// You cannot use an asterisk (*) in the path name. + /// The path for the policy. + /// + /// + /// For more information about paths, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + /// in the IAM User Guide. + /// + /// + /// This parameter is optional. If it is not included, it defaults to a slash + /// (/). + /// + /// + /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + /// a string of characters consisting of either a forward slash (/) by itself + /// or a string that must begin and end with forward slashes. In addition, it + /// can contain any ASCII character from the ! (\u0021) through the DEL character + /// (\u007F), including most punctuation characters, digits, and upper and lowercased + /// letters. + /// + /// + /// You cannot use an asterisk (*) in the path name. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// The JSON policy document that you want to use as the content for the new policy. - /// You must provide policies in JSON format in IAM. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM. - /// The maximum length of the policy document that you can pass in this operation, including whitespace, is listed below. To view the maximum character counts of a managed policy with no whitespaces, see IAM and STS character quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entity-length). - /// To learn more about JSON policy grammar, see Grammar of the IAM JSON policy language (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_grammar.html) in the IAM User Guide. - /// The regex pattern (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of characters consisting of the following: - /// * Any printable ASCII character ranging from the space character (\u0020) through the end of the ASCII character range - /// * The printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF) - /// * The special characters tab (\u0009), line feed (\u000A), and carriage return (\u000D) + /// The JSON policy document that you want to use as the content for the new + /// policy. + /// + /// + /// You must provide policies in JSON format in IAM. However, for CloudFormation + /// templates formatted in YAML, you can provide the policy in JSON or YAML format. + /// CloudFormation always converts a YAML policy to JSON format before submitting + /// it to IAM. + /// + /// + /// The maximum length of the policy document that you can pass in this operation, + /// including whitespace, is listed below. To view the maximum character counts + /// of a managed policy with no whitespaces, see IAM and STS character quotas + /// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entity-length). + /// + /// + /// To learn more about JSON policy grammar, see Grammar of the IAM JSON policy + /// language (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_grammar.html) + /// in the IAM User Guide. + /// + /// + /// The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + /// parameter is a string of characters consisting of the following: + /// + /// + /// * Any printable ASCII character ranging from the space character (\u0020) + /// through the end of the ASCII character range + /// + /// + /// * The printable characters in the Basic Latin and Latin-1 Supplement character + /// set (through \u00FF) + /// + /// + /// * The special characters tab (\u0009), line feed (\u000A), and carriage + /// return (\u000D) #[serde(rename = "policyDocument")] pub policy_document: String, - /// A list of tags that you want to attach to the new IAM customer managed policy. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide. - /// If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created. + /// A list of tags that you want to attach to the new IAM customer managed policy. + /// Each tag consists of a key name and an associated value. For more information + /// about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + /// in the IAM User Guide. + /// + /// + /// If any one of the tags is invalid or if you exceed the allowed maximum number + /// of tags, then the entire request fails and the resource is not created. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// A structure that represents user-provided metadata that can be associated with an IAM resource. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide. +/// A structure that represents user-provided metadata that can be associated +/// with an IAM resource. For more information about tagging, see Tagging IAM +/// resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +/// in the IAM User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct PolicyTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -58,16 +128,23 @@ pub struct PolicyTags { /// PolicyStatus defines the observed state of Policy #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct PolicyStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// The number of entities (users, groups, and roles) that the policy is attached to. + /// The number of entities (users, groups, and roles) that the policy is attached + /// to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "attachmentCount")] pub attachment_count: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the policy was created. + /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + /// when the policy was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "createDate")] pub create_date: Option, /// The identifier for the version of the policy that is set as the default version. @@ -76,34 +153,60 @@ pub struct PolicyStatus { /// Specifies whether the policy can be attached to an IAM user, group, or role. #[serde(default, skip_serializing_if = "Option::is_none", rename = "isAttachable")] pub is_attachable: Option, - /// The number of entities (users and roles) for which the policy is used to set the permissions boundary. - /// For more information about permissions boundaries, see Permissions boundaries for IAM identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) in the IAM User Guide. + /// The number of entities (users and roles) for which the policy is used to + /// set the permissions boundary. + /// + /// + /// For more information about permissions boundaries, see Permissions boundaries + /// for IAM identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + /// in the IAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "permissionsBoundaryUsageCount")] pub permissions_boundary_usage_count: Option, - /// The stable and unique string identifying the policy. - /// For more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. + /// The stable and unique string identifying the policy. + /// + /// + /// For more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + /// in the IAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "policyID")] pub policy_id: Option, - /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the policy was last updated. - /// When a policy has only one version, this field contains the date and time when the policy was created. When a policy has more than one version, this field contains the date and time when the most recent policy version was created. + /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + /// when the policy was last updated. + /// + /// + /// When a policy has only one version, this field contains the date and time + /// when the policy was created. When a policy has more than one version, this + /// field contains the date and time when the most recent policy version was + /// created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "updateDate")] pub update_date: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct PolicyStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct PolicyStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/roles.rs b/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/roles.rs index 2192fe5c0..cb0e33e95 100644 --- a/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/roles.rs +++ b/kube-custom-resources-rs/src/iam_services_k8s_aws/v1alpha1/roles.rs @@ -6,21 +6,44 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// RoleSpec defines the desired state of Role. -/// Contains information about an IAM role. This structure is returned as a response element in several API operations that interact with roles. +/// RoleSpec defines the desired state of Role. +/// +/// +/// Contains information about an IAM role. This structure is returned as a response +/// element in several API operations that interact with roles. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "iam.services.k8s.aws", version = "v1alpha1", kind = "Role", plural = "roles")] #[kube(namespaced)] #[kube(status = "RoleStatus")] #[kube(schema = "disabled")] pub struct RoleSpec { - /// The trust relationship policy document that grants an entity permission to assume the role. - /// In IAM, you must provide a JSON policy that has been converted to a string. However, for CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM. - /// The regex pattern (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of characters consisting of the following: - /// * Any printable ASCII character ranging from the space character (\u0020) through the end of the ASCII character range - /// * The printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF) - /// * The special characters tab (\u0009), line feed (\u000A), and carriage return (\u000D) - /// Upon success, the response includes the same trust policy in JSON format. + /// The trust relationship policy document that grants an entity permission to + /// assume the role. + /// + /// + /// In IAM, you must provide a JSON policy that has been converted to a string. + /// However, for CloudFormation templates formatted in YAML, you can provide + /// the policy in JSON or YAML format. CloudFormation always converts a YAML + /// policy to JSON format before submitting it to IAM. + /// + /// + /// The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + /// parameter is a string of characters consisting of the following: + /// + /// + /// * Any printable ASCII character ranging from the space character (\u0020) + /// through the end of the ASCII character range + /// + /// + /// * The printable characters in the Basic Latin and Latin-1 Supplement character + /// set (through \u00FF) + /// + /// + /// * The special characters tab (\u0009), line feed (\u000A), and carriage + /// return (\u000D) + /// + /// + /// Upon success, the response includes the same trust policy in JSON format. #[serde(rename = "assumeRolePolicyDocument")] pub assume_role_policy_document: String, /// A description of the role. @@ -28,71 +51,146 @@ pub struct RoleSpec { pub description: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "inlinePolicies")] pub inline_policies: Option>, - /// The maximum session duration (in seconds) that you want to set for the specified role. If you do not specify a value for this setting, the default value of one hour is applied. This setting can have a value from 1 hour to 12 hours. - /// Anyone who assumes the role from the CLI or API can use the DurationSeconds API parameter or the duration-seconds CLI parameter to request a longer session. The MaxSessionDuration setting determines the maximum duration that can be requested using the DurationSeconds parameter. If users don't specify a value for the DurationSeconds parameter, their security credentials are valid for one hour by default. This applies when you use the AssumeRole* API operations or the assume-role* CLI operations but does not apply when you use those operations to create a console URL. For more information, see Using IAM roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM User Guide. + /// The maximum session duration (in seconds) that you want to set for the specified + /// role. If you do not specify a value for this setting, the default value of + /// one hour is applied. This setting can have a value from 1 hour to 12 hours. + /// + /// + /// Anyone who assumes the role from the CLI or API can use the DurationSeconds + /// API parameter or the duration-seconds CLI parameter to request a longer session. + /// The MaxSessionDuration setting determines the maximum duration that can be + /// requested using the DurationSeconds parameter. If users don't specify a value + /// for the DurationSeconds parameter, their security credentials are valid for + /// one hour by default. This applies when you use the AssumeRole* API operations + /// or the assume-role* CLI operations but does not apply when you use those + /// operations to create a console URL. For more information, see Using IAM roles + /// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the + /// IAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxSessionDuration")] pub max_session_duration: Option, - /// The name of the role to create. - /// IAM user, group, role, and policy names must be unique within the account. Names are not distinguished by case. For example, you cannot create resources named both "MyResource" and "myresource". - /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: _+=,.@- + /// The name of the role to create. + /// + /// + /// IAM user, group, role, and policy names must be unique within the account. + /// Names are not distinguished by case. For example, you cannot create resources + /// named both "MyResource" and "myresource". + /// + /// + /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + /// a string of characters consisting of upper and lowercase alphanumeric characters + /// with no spaces. You can also include any of the following characters: _+=,.@- pub name: String, - /// The path to the role. For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. - /// This parameter is optional. If it is not included, it defaults to a slash (/). - /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\u0021) through the DEL character (\u007F), including most punctuation characters, digits, and upper and lowercased letters. + /// The path to the role. For more information about paths, see IAM Identifiers + /// (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + /// in the IAM User Guide. + /// + /// + /// This parameter is optional. If it is not included, it defaults to a slash + /// (/). + /// + /// + /// This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + /// a string of characters consisting of either a forward slash (/) by itself + /// or a string that must begin and end with forward slashes. In addition, it + /// can contain any ASCII character from the ! (\u0021) through the DEL character + /// (\u007F), including most punctuation characters, digits, and upper and lowercased + /// letters. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// The ARN of the managed policy that is used to set the permissions boundary for the role. - /// A permissions boundary policy defines the maximum permissions that identity-based policies can grant to an entity, but does not grant permissions. Permissions boundaries do not define the maximum permissions that a resource-based policy can grant to an entity. To learn more, see Permissions boundaries for IAM entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) in the IAM User Guide. - /// For more information about policy types, see Policy types (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types) in the IAM User Guide. + /// The ARN of the managed policy that is used to set the permissions boundary + /// for the role. + /// + /// + /// A permissions boundary policy defines the maximum permissions that identity-based + /// policies can grant to an entity, but does not grant permissions. Permissions + /// boundaries do not define the maximum permissions that a resource-based policy + /// can grant to an entity. To learn more, see Permissions boundaries for IAM + /// entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + /// in the IAM User Guide. + /// + /// + /// For more information about policy types, see Policy types (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policy-types) + /// in the IAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "permissionsBoundary")] pub permissions_boundary: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "permissionsBoundaryRef")] pub permissions_boundary_ref: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub policies: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "policyRefs")] pub policy_refs: Option>, - /// A list of tags that you want to attach to the new role. Each tag consists of a key name and an associated value. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide. - /// If any one of the tags is invalid or if you exceed the allowed maximum number of tags, then the entire request fails and the resource is not created. + /// A list of tags that you want to attach to the new role. Each tag consists + /// of a key name and an associated value. For more information about tagging, + /// see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + /// in the IAM User Guide. + /// + /// + /// If any one of the tags is invalid or if you exceed the allowed maximum number + /// of tags, then the entire request fails and the resource is not created. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RolePermissionsBoundaryRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RolePermissionsBoundaryRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RolePolicyRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RolePolicyRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// A structure that represents user-provided metadata that can be associated with an IAM resource. For more information about tagging, see Tagging IAM resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User Guide. +/// A structure that represents user-provided metadata that can be associated +/// with an IAM resource. For more information about tagging, see Tagging IAM +/// resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +/// in the IAM User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RoleTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -104,37 +202,63 @@ pub struct RoleTags { /// RoleStatus defines the observed state of Role #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RoleStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), when the role was created. + /// The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + /// when the role was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "createDate")] pub create_date: Option, - /// The stable and unique string identifying the role. For more information about IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) in the IAM User Guide. + /// The stable and unique string identifying the role. For more information about + /// IDs, see IAM identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + /// in the IAM User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "roleID")] pub role_id: Option, - /// Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) in the IAM user Guide. + /// Contains information about the last time that an IAM role was used. This + /// includes the date and time and the Region in which the role was last used. + /// Activity is only reported for the trailing 400 days. This period can be shorter + /// if your Region began supporting these features within the last year. The + /// role might have been used more than 400 days ago. For more information, see + /// Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) + /// in the IAM user Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "roleLastUsed")] pub role_last_used: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RoleStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RoleStatusConditions { /// Last time the condition transitioned from one status to another. @@ -153,7 +277,13 @@ pub struct RoleStatusConditions { pub r#type: String, } -/// Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) in the IAM user Guide. +/// Contains information about the last time that an IAM role was used. This +/// includes the date and time and the Region in which the role was last used. +/// Activity is only reported for the trailing 400 days. This period can be shorter +/// if your Region began supporting these features within the last year. The +/// role might have been used more than 400 days ago. For more information, see +/// Regions where data is tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) +/// in the IAM user Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RoleStatusRoleLastUsed { #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastUsedDate")] diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/mod.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/mod.rs index e2e30750a..ae34f8fa6 100644 --- a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/mod.rs +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/mod.rs @@ -1,3 +1,5 @@ pub mod v1alpha1; +pub mod v1alpha3; +pub mod v1alpha4; pub mod v1beta1; pub mod v1beta2; diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/mod.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/mod.rs new file mode 100644 index 000000000..e47dec285 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/mod.rs @@ -0,0 +1,7 @@ +pub mod vsphereclusteridentities; +pub mod vsphereclusters; +pub mod vspheredeploymentzones; +pub mod vspherefailuredomains; +pub mod vspheremachines; +pub mod vspheremachinetemplates; +pub mod vspherevms; diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusteridentities.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusteridentities.rs new file mode 100644 index 000000000..fc2ec3aa7 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusteridentities.rs @@ -0,0 +1,83 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusteridentities.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha3", kind = "VSphereClusterIdentity", plural = "vsphereclusteridentities")] +#[kube(status = "VSphereClusterIdentityStatus")] +#[kube(schema = "disabled")] +pub struct VSphereClusterIdentitySpec { + /// AllowedNamespaces is used to identify which namespaces are allowed to use this account. Namespaces can be selected with a label selector. If this object is nil, no namespaces will be allowed + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedNamespaces")] + pub allowed_namespaces: Option, + /// SecretName references a Secret inside the controller namespace with the credentials to use + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretName")] + pub secret_name: Option, +} + +/// AllowedNamespaces is used to identify which namespaces are allowed to use this account. Namespaces can be selected with a label selector. If this object is nil, no namespaces will be allowed +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityAllowedNamespaces { + /// Selector is a standard Kubernetes LabelSelector. A label query over a set of resources. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option, +} + +/// Selector is a standard Kubernetes LabelSelector. A label query over a set of resources. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityAllowedNamespacesSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityAllowedNamespacesSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityStatus { + /// Conditions defines current service state of the VSphereCluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusters.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusters.rs new file mode 100644 index 000000000..623a640db --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vsphereclusters.rs @@ -0,0 +1,319 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vsphereclusters.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereClusterSpec defines the desired state of VSphereCluster. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha3", kind = "VSphereCluster", plural = "vsphereclusters")] +#[kube(namespaced)] +#[kube(status = "VSphereClusterStatus")] +#[kube(schema = "disabled")] +pub struct VSphereClusterSpec { + /// CloudProviderConfiguration holds the cluster-wide configuration for the vSphere cloud provider. + /// Deprecated: will be removed in v1alpha4. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudProviderConfiguration")] + pub cloud_provider_configuration: Option, + /// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneEndpoint")] + pub control_plane_endpoint: Option, + /// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "identityRef")] + pub identity_ref: Option, + /// Insecure is a flag that controls whether to validate the vSphere server's certificate. + /// Deprecated: will be removed in v1alpha4. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub insecure: Option, + /// LoadBalancerRef may be used to enable a control plane load balancer for this cluster. When a LoadBalancerRef is provided, the VSphereCluster.Status.Ready field will not be true until the referenced resource is Status.Ready and has a non-empty Status.Address value. + /// Deprecated: will be removed in v1alpha4. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "loadBalancerRef")] + pub load_balancer_ref: Option, + /// Server is the address of the vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When provided, Insecure should not be set to true + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// CloudProviderConfiguration holds the cluster-wide configuration for the vSphere cloud provider. +/// Deprecated: will be removed in v1alpha4. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfiguration { + /// Disk is the vSphere cloud provider's disk configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disk: Option, + /// Global is the vSphere cloud provider's global configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub global: Option, + /// Labels is the vSphere cloud provider's zone and region configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option, + /// Network is the vSphere cloud provider's network configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option, + /// CPIProviderConfig contains extra information used to configure the vSphere cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerConfig")] + pub provider_config: Option, + /// VCenter is a list of vCenter configurations. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "virtualCenter")] + pub virtual_center: Option>, + /// Workspace is the vSphere cloud provider's workspace configuration. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub workspace: Option, +} + +/// Disk is the vSphere cloud provider's disk configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationDisk { + /// SCSIControllerType defines SCSI controller to be used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "scsiControllerType")] + pub scsi_controller_type: Option, +} + +/// Global is the vSphere cloud provider's global configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationGlobal { + /// APIBindPort configures the vSphere cloud controller manager API port. Defaults to 43001. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiBindPort")] + pub api_bind_port: Option, + /// APIDisable disables the vSphere cloud controller manager API. Defaults to true. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiDisable")] + pub api_disable: Option, + /// CAFile Specifies the path to a CA certificate in PEM format. If not configured, the system's CA certificates will be used. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "caFile")] + pub ca_file: Option, + /// Datacenters is a CSV string of the datacenters in which VMs are located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenters: Option, + /// Insecure is a flag that disables TLS peer verification. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub insecure: Option, + /// Password is the password used to access a vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub password: Option, + /// Port is the port on which the vSphere endpoint is listening. Defaults to 443. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub port: Option, + /// RoundTripperCount specifies the SOAP round tripper count (retries = RoundTripper - 1) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "roundTripperCount")] + pub round_tripper_count: Option, + /// SecretName is the name of the Kubernetes secret in which the vSphere credentials are located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretName")] + pub secret_name: Option, + /// SecretNamespace is the namespace for SecretName. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretNamespace")] + pub secret_namespace: Option, + /// SecretsDirectory is a directory in which secrets may be found. This may used in the event that: 1. It is not desirable to use the K8s API to watch changes to secrets 2. The cloud controller manager is not running in a K8s environment, such as DC/OS. For example, the container storage interface (CSI) is container orcehstrator (CO) agnostic, and should support non-K8s COs. Defaults to /etc/cloud/credentials. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretsDirectory")] + pub secrets_directory: Option, + /// ServiceAccount is the Kubernetes service account used to launch the cloud controller manager. Defaults to cloud-controller-manager. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccount")] + pub service_account: Option, + /// Thumbprint is the cryptographic thumbprint of the vSphere endpoint's certificate. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, + /// Username is the username used to access a vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub username: Option, +} + +/// Labels is the vSphere cloud provider's zone and region configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationLabels { + /// Region is the region in which VMs are created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub region: Option, + /// Zone is the zone in which VMs are created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub zone: Option, +} + +/// Network is the vSphere cloud provider's network configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationNetwork { + /// Name is the name of the network to which VMs are connected. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, +} + +/// CPIProviderConfig contains extra information used to configure the vSphere cloud provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationProviderConfig { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cloud: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub storage: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationProviderConfigCloud { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controllerImage")] + pub controller_image: Option, + /// ExtraArgs passes through extra arguments to the cloud provider. The arguments here are passed to the cloud provider daemonset specification + #[serde(default, skip_serializing_if = "Option::is_none", rename = "extraArgs")] + pub extra_args: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationProviderConfigStorage { + #[serde(default, skip_serializing_if = "Option::is_none", rename = "attacherImage")] + pub attacher_image: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controllerImage")] + pub controller_image: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbeImage")] + pub liveness_probe_image: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "metadataSyncerImage")] + pub metadata_syncer_image: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeDriverImage")] + pub node_driver_image: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "provisionerImage")] + pub provisioner_image: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "registrarImage")] + pub registrar_image: Option, +} + +/// VCenter is a list of vCenter configurations. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationVirtualCenter { + /// Datacenters is a CSV string of the datacenters in which VMs are located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenters: Option, + /// Password is the password used to access a vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub password: Option, + /// Port is the port on which the vSphere endpoint is listening. Defaults to 443. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub port: Option, + /// RoundTripperCount specifies the SOAP round tripper count (retries = RoundTripper - 1) + #[serde(default, skip_serializing_if = "Option::is_none", rename = "roundTripperCount")] + pub round_tripper_count: Option, + /// Thumbprint is the cryptographic thumbprint of the vSphere endpoint's certificate. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, + /// Username is the username used to access a vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub username: Option, +} + +/// Workspace is the vSphere cloud provider's workspace configuration. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterCloudProviderConfigurationWorkspace { + /// Datacenter is the datacenter in which VMs are created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenter: Option, + /// Datastore is the datastore in which VMs are created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// Folder is the folder in which VMs are created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// ResourcePool is the resource pool in which VMs are created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, + /// Server is the IP address or FQDN of the vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, +} + +/// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterControlPlaneEndpoint { + /// The hostname on which the API server is serving. + pub host: String, + /// The port on which the API server is serving. + pub port: i32, +} + +/// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityRef { + /// Kind of the identity. Can either be VSphereClusterIdentity or Secret + pub kind: VSphereClusterIdentityRefKind, + /// Name of the identity. + pub name: String, +} + +/// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum VSphereClusterIdentityRefKind { + VSphereClusterIdentity, + Secret, +} + +/// LoadBalancerRef may be used to enable a control plane load balancer for this cluster. When a LoadBalancerRef is provided, the VSphereCluster.Status.Ready field will not be true until the referenced resource is Status.Ready and has a non-empty Status.Address value. +/// Deprecated: will be removed in v1alpha4. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterLoadBalancerRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// VSphereClusterStatus defines the observed state of VSphereClusterSpec. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterStatus { + /// Conditions defines current service state of the VSphereCluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureDomains is a list of failure domain objects synced from the infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomains")] + pub failure_domains: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// FailureDomains is a list of failure domain objects synced from the infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterStatusFailureDomains { + /// Attributes is a free form map of attributes an infrastructure provider might use or require. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub attributes: Option>, + /// ControlPlane determines if this failure domain is suitable for use by control plane machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlane")] + pub control_plane: Option, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheredeploymentzones.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheredeploymentzones.rs new file mode 100644 index 000000000..739dca77a --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheredeploymentzones.rs @@ -0,0 +1,70 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheredeploymentzones.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// VSphereDeploymentZoneSpec defines the desired state of VSphereDeploymentZone +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha3", kind = "VSphereDeploymentZone", plural = "vspheredeploymentzones")] +#[kube(status = "VSphereDeploymentZoneStatus")] +#[kube(schema = "disabled")] +pub struct VSphereDeploymentZoneSpec { + /// ControlPlane determines if this failure domain is suitable for use by control plane machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlane")] + pub control_plane: Option, + /// failureDomain is the name of the VSphereFailureDomain used for this VSphereDeploymentZone + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// PlacementConstraint encapsulates the placement constraints used within this deployment zone. + #[serde(rename = "placementConstraint")] + pub placement_constraint: VSphereDeploymentZonePlacementConstraint, + /// Server is the address of the vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, +} + +/// PlacementConstraint encapsulates the placement constraints used within this deployment zone. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereDeploymentZonePlacementConstraint { + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereDeploymentZoneStatus { + /// Conditions defines current service state of the VSphereMachine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// Ready is true when the VSphereDeploymentZone resource is ready. If set to false, it will be ignored by VSphereClusters + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereDeploymentZoneStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherefailuredomains.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherefailuredomains.rs new file mode 100644 index 000000000..9d946826f --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherefailuredomains.rs @@ -0,0 +1,98 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherefailuredomains.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// VSphereFailureDomainSpec defines the desired state of VSphereFailureDomain +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha3", kind = "VSphereFailureDomain", plural = "vspherefailuredomains")] +#[kube(schema = "disabled")] +pub struct VSphereFailureDomainSpec { + /// Region defines the name and type of a region + pub region: VSphereFailureDomainRegion, + /// Topology is the what describes a given failure domain using vSphere constructs + pub topology: VSphereFailureDomainTopology, + /// Zone defines the name and type of a zone + pub zone: VSphereFailureDomainZone, +} + +/// Region defines the name and type of a region +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainRegion { + /// AutoConfigure tags the Type which is specified in the Topology + #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoConfigure")] + pub auto_configure: Option, + /// Name is the name of the tag that represents this failure domain + pub name: String, + /// TagCategory is the category used for the tag + #[serde(rename = "tagCategory")] + pub tag_category: String, + /// Type is the type of failure domain, the current values are "Datacenter", "ComputeCluster" and "HostGroup" + #[serde(rename = "type")] + pub r#type: VSphereFailureDomainRegionType, +} + +/// Region defines the name and type of a region +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum VSphereFailureDomainRegionType { + Datacenter, + ComputeCluster, + HostGroup, +} + +/// Topology is the what describes a given failure domain using vSphere constructs +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainTopology { + /// ComputeCluster as the failure domain + #[serde(default, skip_serializing_if = "Option::is_none", rename = "computeCluster")] + pub compute_cluster: Option, + /// The underlying infrastructure for this failure domain Datacenter as the failure domain + pub datacenter: String, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// Hosts has information required for placement of machines on VSphere hosts. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hosts: Option, + /// Networks is the list of networks within this failure domain + #[serde(default, skip_serializing_if = "Option::is_none")] + pub networks: Option>, +} + +/// Hosts has information required for placement of machines on VSphere hosts. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainTopologyHosts { + /// HostGroupName is the name of the Host group + #[serde(rename = "hostGroupName")] + pub host_group_name: String, + /// VMGroupName is the name of the VM group + #[serde(rename = "vmGroupName")] + pub vm_group_name: String, +} + +/// Zone defines the name and type of a zone +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainZone { + /// AutoConfigure tags the Type which is specified in the Topology + #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoConfigure")] + pub auto_configure: Option, + /// Name is the name of the tag that represents this failure domain + pub name: String, + /// TagCategory is the category used for the tag + #[serde(rename = "tagCategory")] + pub tag_category: String, + /// Type is the type of failure domain, the current values are "Datacenter", "ComputeCluster" and "HostGroup" + #[serde(rename = "type")] + pub r#type: VSphereFailureDomainZoneType, +} + +/// Zone defines the name and type of a zone +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum VSphereFailureDomainZoneType { + Datacenter, + ComputeCluster, + HostGroup, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachines.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachines.rs new file mode 100644 index 000000000..fa2d0a47c --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachines.rs @@ -0,0 +1,221 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachines.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereMachineSpec defines the desired state of VSphereMachine +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha3", kind = "VSphereMachine", plural = "vspheremachines")] +#[kube(namespaced)] +#[kube(status = "VSphereMachineStatus")] +#[kube(schema = "disabled")] +pub struct VSphereMachineSpec { + /// CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customVMXKeys")] + pub custom_vmx_keys: Option>, + /// Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenter: Option, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "diskGiB")] + pub disk_gi_b: Option, + /// FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "memoryMiB")] + pub memory_mi_b: Option, + /// Network is the network configuration for this machine's VM. + pub network: VSphereMachineNetwork, + /// NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCPUs")] + pub num_cp_us: Option, + /// NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCoresPerSocket")] + pub num_cores_per_socket: Option, + /// ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, + /// Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// StoragePolicyName of the storage policy to use with this Virtual Machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "storagePolicyName")] + pub storage_policy_name: Option, + /// Template is the name or inventory path of the template used to clone the virtual machine. + pub template: String, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// Network is the network configuration for this machine's VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetwork { + /// Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name + pub devices: Vec, + /// PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredAPIServerCidr")] + pub preferred_api_server_cidr: Option, + /// Routes is a list of optional, static routes applied to the virtual machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, +} + +/// NetworkDeviceSpec defines the network configuration for a virtual machine's network device. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetworkDevices { + /// DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deviceName")] + pub device_name: Option, + /// DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp4: Option, + /// DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp6: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway4: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway6: Option, + /// IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "macAddr")] + pub mac_addr: Option, + /// MTU is the device’s Maximum Transmission Unit size in bytes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nameservers: Option>, + /// NetworkName is the name of the vSphere network to which the device will be connected. + #[serde(rename = "networkName")] + pub network_name: String, + /// Routes is a list of optional, static routes applied to the device. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, + /// SearchDomains is a list of search domains used when resolving IP addresses with DNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "searchDomains")] + pub search_domains: Option>, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetworkDevicesRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetworkRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// VSphereMachineStatus defines the observed state of VSphereMachine +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatus { + /// Addresses contains the VSphere instance associated addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub addresses: Option>, + /// Conditions defines current service state of the VSphereMachine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. + /// Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// FailureReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. + /// Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// Network returns the network status for each of the machine's configured network interfaces. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option>, + /// Ready is true when the provider resource is ready. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// MachineAddress contains information for the node's address. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatusAddresses { + /// The machine address. + pub address: String, + /// Machine address type, one of Hostname, ExternalIP or InternalIP. + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// NetworkStatus provides information about one of a VM's networks. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatusNetwork { + /// Connected is a flag that indicates whether this network is currently connected to the VM. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub connected: Option, + /// IPAddrs is one or more IP addresses reported by vm-tools. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address of the network device. + #[serde(rename = "macAddr")] + pub mac_addr: String, + /// NetworkName is the name of the network. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkName")] + pub network_name: Option, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachinetemplates.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachinetemplates.rs new file mode 100644 index 000000000..537c1b2c5 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspheremachinetemplates.rs @@ -0,0 +1,211 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspheremachinetemplates.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereMachineTemplateSpec defines the desired state of VSphereMachineTemplate +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha3", kind = "VSphereMachineTemplate", plural = "vspheremachinetemplates")] +#[kube(namespaced)] +#[kube(schema = "disabled")] +pub struct VSphereMachineTemplateSpec { + /// VSphereMachineTemplateResource describes the data needed to create a VSphereMachine from a template + pub template: VSphereMachineTemplateTemplate, +} + +/// VSphereMachineTemplateResource describes the data needed to create a VSphereMachine from a template +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplate { + /// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Spec is the specification of the desired behavior of the machine. + pub spec: VSphereMachineTemplateTemplateSpec, +} + +/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. + /// If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). + /// Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "generateName")] + pub generate_name: Option, + /// Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, + /// Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. + /// Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + /// Deprecated: This field has no function and is going to be removed in a next release. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownerReferences")] + pub owner_references: Option>, +} + +/// OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateMetadataOwnerReferences { + /// API version of the referent. + #[serde(rename = "apiVersion")] + pub api_version: String, + /// If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockOwnerDeletion")] + pub block_owner_deletion: Option, + /// If true, this reference points to the managing controller. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub controller: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + pub kind: String, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + pub name: String, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + pub uid: String, +} + +/// Spec is the specification of the desired behavior of the machine. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpec { + /// CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customVMXKeys")] + pub custom_vmx_keys: Option>, + /// Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenter: Option, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "diskGiB")] + pub disk_gi_b: Option, + /// FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "memoryMiB")] + pub memory_mi_b: Option, + /// Network is the network configuration for this machine's VM. + pub network: VSphereMachineTemplateTemplateSpecNetwork, + /// NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCPUs")] + pub num_cp_us: Option, + /// NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCoresPerSocket")] + pub num_cores_per_socket: Option, + /// ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, + /// Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// StoragePolicyName of the storage policy to use with this Virtual Machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "storagePolicyName")] + pub storage_policy_name: Option, + /// Template is the name or inventory path of the template used to clone the virtual machine. + pub template: String, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// Network is the network configuration for this machine's VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetwork { + /// Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name + pub devices: Vec, + /// PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredAPIServerCidr")] + pub preferred_api_server_cidr: Option, + /// Routes is a list of optional, static routes applied to the virtual machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, +} + +/// NetworkDeviceSpec defines the network configuration for a virtual machine's network device. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetworkDevices { + /// DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deviceName")] + pub device_name: Option, + /// DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp4: Option, + /// DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp6: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway4: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway6: Option, + /// IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "macAddr")] + pub mac_addr: Option, + /// MTU is the device’s Maximum Transmission Unit size in bytes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nameservers: Option>, + /// NetworkName is the name of the vSphere network to which the device will be connected. + #[serde(rename = "networkName")] + pub network_name: String, + /// Routes is a list of optional, static routes applied to the device. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, + /// SearchDomains is a list of search domains used when resolving IP addresses with DNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "searchDomains")] + pub search_domains: Option>, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetworkDevicesRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetworkRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherevms.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherevms.rs new file mode 100644 index 000000000..56edf019e --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha3/vspherevms.rs @@ -0,0 +1,249 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha3/vspherevms.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereVMSpec defines the desired state of VSphereVM. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha3", kind = "VSphereVM", plural = "vspherevms")] +#[kube(namespaced)] +#[kube(status = "VSphereVMStatus")] +#[kube(schema = "disabled")] +pub struct VSphereVMSpec { + /// BiosUUID is the VM's BIOS UUID that is assigned at runtime after the VM has been created. This field is required at runtime for other controllers that read this CRD as unstructured data. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "biosUUID")] + pub bios_uuid: Option, + /// BootstrapRef is a reference to a bootstrap provider-specific resource that holds configuration details. This field is optional in case no bootstrap data is required to create a VM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "bootstrapRef")] + pub bootstrap_ref: Option, + /// CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customVMXKeys")] + pub custom_vmx_keys: Option>, + /// Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenter: Option, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "diskGiB")] + pub disk_gi_b: Option, + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "memoryMiB")] + pub memory_mi_b: Option, + /// Network is the network configuration for this machine's VM. + pub network: VSphereVMNetwork, + /// NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCPUs")] + pub num_cp_us: Option, + /// NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCoresPerSocket")] + pub num_cores_per_socket: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, + /// Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// StoragePolicyName of the storage policy to use with this Virtual Machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "storagePolicyName")] + pub storage_policy_name: Option, + /// Template is the name or inventory path of the template used to clone the virtual machine. + pub template: String, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// BootstrapRef is a reference to a bootstrap provider-specific resource that holds configuration details. This field is optional in case no bootstrap data is required to create a VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMBootstrapRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// Network is the network configuration for this machine's VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetwork { + /// Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name + pub devices: Vec, + /// PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredAPIServerCidr")] + pub preferred_api_server_cidr: Option, + /// Routes is a list of optional, static routes applied to the virtual machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, +} + +/// NetworkDeviceSpec defines the network configuration for a virtual machine's network device. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetworkDevices { + /// DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deviceName")] + pub device_name: Option, + /// DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp4: Option, + /// DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp6: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway4: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway6: Option, + /// IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "macAddr")] + pub mac_addr: Option, + /// MTU is the device’s Maximum Transmission Unit size in bytes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nameservers: Option>, + /// NetworkName is the name of the vSphere network to which the device will be connected. + #[serde(rename = "networkName")] + pub network_name: String, + /// Routes is a list of optional, static routes applied to the device. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, + /// SearchDomains is a list of search domains used when resolving IP addresses with DNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "searchDomains")] + pub search_domains: Option>, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetworkDevicesRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetworkRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// VSphereVMStatus defines the observed state of VSphereVM +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMStatus { + /// Addresses is a list of the VM's IP addresses. This field is required at runtime for other controllers that read this CRD as unstructured data. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub addresses: Option>, + /// CloneMode is the type of clone operation used to clone this VM. Since LinkedMode is the default but fails gracefully if the source of the clone has no snapshots, this field may be used to determine the actual type of clone operation used to create this VM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// Conditions defines current service state of the VSphereVM. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureMessage will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a more verbose string suitable for logging and human consumption. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. + /// Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// FailureReason will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a succinct value suitable for vm interpretation. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. + /// Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// Network returns the network status for each of the machine's configured network interfaces. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option>, + /// Ready is true when the provider resource is ready. This field is required at runtime for other controllers that read this CRD as unstructured data. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, + /// RetryAfter tracks the time we can retry queueing a task + #[serde(default, skip_serializing_if = "Option::is_none", rename = "retryAfter")] + pub retry_after: Option, + /// Snapshot is the name of the snapshot from which the VM was cloned if LinkedMode is enabled. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "taskRef")] + pub task_ref: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// NetworkStatus provides information about one of a VM's networks. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMStatusNetwork { + /// Connected is a flag that indicates whether this network is currently connected to the VM. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub connected: Option, + /// IPAddrs is one or more IP addresses reported by vm-tools. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address of the network device. + #[serde(rename = "macAddr")] + pub mac_addr: String, + /// NetworkName is the name of the network. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkName")] + pub network_name: Option, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/mod.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/mod.rs new file mode 100644 index 000000000..39d6b9adc --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/mod.rs @@ -0,0 +1,8 @@ +pub mod vsphereclusteridentities; +pub mod vsphereclusters; +pub mod vsphereclustertemplates; +pub mod vspheredeploymentzones; +pub mod vspherefailuredomains; +pub mod vspheremachines; +pub mod vspheremachinetemplates; +pub mod vspherevms; diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusteridentities.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusteridentities.rs new file mode 100644 index 000000000..88d65bd31 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusteridentities.rs @@ -0,0 +1,83 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusteridentities.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereClusterIdentity", plural = "vsphereclusteridentities")] +#[kube(status = "VSphereClusterIdentityStatus")] +#[kube(schema = "disabled")] +pub struct VSphereClusterIdentitySpec { + /// AllowedNamespaces is used to identify which namespaces are allowed to use this account. Namespaces can be selected with a label selector. If this object is nil, no namespaces will be allowed + #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedNamespaces")] + pub allowed_namespaces: Option, + /// SecretName references a Secret inside the controller namespace with the credentials to use + #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretName")] + pub secret_name: Option, +} + +/// AllowedNamespaces is used to identify which namespaces are allowed to use this account. Namespaces can be selected with a label selector. If this object is nil, no namespaces will be allowed +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityAllowedNamespaces { + /// Selector is a standard Kubernetes LabelSelector. A label query over a set of resources. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selector: Option, +} + +/// Selector is a standard Kubernetes LabelSelector. A label query over a set of resources. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityAllowedNamespacesSelector { + /// matchExpressions is a list of label selector requirements. The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] + pub match_expressions: Option>, + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] + pub match_labels: Option>, +} + +/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityAllowedNamespacesSelectorMatchExpressions { + /// key is the label key that the selector applies to. + pub key: String, + /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + pub operator: String, + /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub values: Option>, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityStatus { + /// Conditions defines current service state of the VSphereCluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusters.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusters.rs new file mode 100644 index 000000000..cd910ed45 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclusters.rs @@ -0,0 +1,100 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclusters.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereClusterSpec defines the desired state of VSphereCluster +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereCluster", plural = "vsphereclusters")] +#[kube(namespaced)] +#[kube(status = "VSphereClusterStatus")] +#[kube(schema = "disabled")] +pub struct VSphereClusterSpec { + /// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneEndpoint")] + pub control_plane_endpoint: Option, + /// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "identityRef")] + pub identity_ref: Option, + /// Server is the address of the vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterControlPlaneEndpoint { + /// The hostname on which the API server is serving. + pub host: String, + /// The port on which the API server is serving. + pub port: i32, +} + +/// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterIdentityRef { + /// Kind of the identity. Can either be VSphereClusterIdentity or Secret + pub kind: VSphereClusterIdentityRefKind, + /// Name of the identity. + pub name: String, +} + +/// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum VSphereClusterIdentityRefKind { + VSphereClusterIdentity, + Secret, +} + +/// VSphereClusterStatus defines the observed state of VSphereClusterSpec +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterStatus { + /// Conditions defines current service state of the VSphereCluster. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureDomains is a list of failure domain objects synced from the infrastructure provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomains")] + pub failure_domains: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// FailureDomains is a list of failure domain objects synced from the infrastructure provider. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterStatusFailureDomains { + /// Attributes is a free form map of attributes an infrastructure provider might use or require. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub attributes: Option>, + /// ControlPlane determines if this failure domain is suitable for use by control plane machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlane")] + pub control_plane: Option, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclustertemplates.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclustertemplates.rs new file mode 100644 index 000000000..07b5f991c --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vsphereclustertemplates.rs @@ -0,0 +1,64 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vsphereclustertemplates.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// VSphereClusterTemplateSpec defines the desired state of VSphereClusterTemplate +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereClusterTemplate", plural = "vsphereclustertemplates")] +#[kube(namespaced)] +#[kube(schema = "disabled")] +pub struct VSphereClusterTemplateSpec { + pub template: VSphereClusterTemplateTemplate, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterTemplateTemplate { + /// VSphereClusterSpec defines the desired state of VSphereCluster + pub spec: VSphereClusterTemplateTemplateSpec, +} + +/// VSphereClusterSpec defines the desired state of VSphereCluster +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterTemplateTemplateSpec { + /// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlaneEndpoint")] + pub control_plane_endpoint: Option, + /// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "identityRef")] + pub identity_ref: Option, + /// Server is the address of the vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterTemplateTemplateSpecControlPlaneEndpoint { + /// The hostname on which the API server is serving. + pub host: String, + /// The port on which the API server is serving. + pub port: i32, +} + +/// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereClusterTemplateTemplateSpecIdentityRef { + /// Kind of the identity. Can either be VSphereClusterIdentity or Secret + pub kind: VSphereClusterTemplateTemplateSpecIdentityRefKind, + /// Name of the identity. + pub name: String, +} + +/// IdentityRef is a reference to either a Secret or VSphereClusterIdentity that contains the identity to use when reconciling the cluster. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum VSphereClusterTemplateTemplateSpecIdentityRefKind { + VSphereClusterIdentity, + Secret, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheredeploymentzones.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheredeploymentzones.rs new file mode 100644 index 000000000..53dc36354 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheredeploymentzones.rs @@ -0,0 +1,70 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheredeploymentzones.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// VSphereDeploymentZoneSpec defines the desired state of VSphereDeploymentZone +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereDeploymentZone", plural = "vspheredeploymentzones")] +#[kube(status = "VSphereDeploymentZoneStatus")] +#[kube(schema = "disabled")] +pub struct VSphereDeploymentZoneSpec { + /// ControlPlane determines if this failure domain is suitable for use by control plane machines. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "controlPlane")] + pub control_plane: Option, + /// FailureDomain is the name of the VSphereFailureDomain used for this VSphereDeploymentZone + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// PlacementConstraint encapsulates the placement constraints used within this deployment zone. + #[serde(rename = "placementConstraint")] + pub placement_constraint: VSphereDeploymentZonePlacementConstraint, + /// Server is the address of the vSphere endpoint. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, +} + +/// PlacementConstraint encapsulates the placement constraints used within this deployment zone. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereDeploymentZonePlacementConstraint { + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereDeploymentZoneStatus { + /// Conditions defines current service state of the VSphereMachine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// Ready is true when the VSphereDeploymentZone resource is ready. If set to false, it will be ignored by VSphereClusters + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereDeploymentZoneStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherefailuredomains.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherefailuredomains.rs new file mode 100644 index 000000000..9e10ba4d3 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherefailuredomains.rs @@ -0,0 +1,98 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherefailuredomains.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; + +/// VSphereFailureDomainSpec defines the desired state of VSphereFailureDomain +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereFailureDomain", plural = "vspherefailuredomains")] +#[kube(schema = "disabled")] +pub struct VSphereFailureDomainSpec { + /// Region defines the name and type of a region + pub region: VSphereFailureDomainRegion, + /// Topology describes a given failure domain using vSphere constructs + pub topology: VSphereFailureDomainTopology, + /// Zone defines the name and type of a zone + pub zone: VSphereFailureDomainZone, +} + +/// Region defines the name and type of a region +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainRegion { + /// AutoConfigure tags the Type which is specified in the Topology + #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoConfigure")] + pub auto_configure: Option, + /// Name is the name of the tag that represents this failure domain + pub name: String, + /// TagCategory is the category used for the tag + #[serde(rename = "tagCategory")] + pub tag_category: String, + /// Type is the type of failure domain, the current values are "Datacenter", "ComputeCluster" and "HostGroup" + #[serde(rename = "type")] + pub r#type: VSphereFailureDomainRegionType, +} + +/// Region defines the name and type of a region +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum VSphereFailureDomainRegionType { + Datacenter, + ComputeCluster, + HostGroup, +} + +/// Topology describes a given failure domain using vSphere constructs +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainTopology { + /// ComputeCluster as the failure domain + #[serde(default, skip_serializing_if = "Option::is_none", rename = "computeCluster")] + pub compute_cluster: Option, + /// The underlying infrastructure for this failure domain Datacenter as the failure domain + pub datacenter: String, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// Hosts has information required for placement of machines on VSphere hosts. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hosts: Option, + /// Networks is the list of networks within this failure domain + #[serde(default, skip_serializing_if = "Option::is_none")] + pub networks: Option>, +} + +/// Hosts has information required for placement of machines on VSphere hosts. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainTopologyHosts { + /// HostGroupName is the name of the Host group + #[serde(rename = "hostGroupName")] + pub host_group_name: String, + /// VMGroupName is the name of the VM group + #[serde(rename = "vmGroupName")] + pub vm_group_name: String, +} + +/// Zone defines the name and type of a zone +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereFailureDomainZone { + /// AutoConfigure tags the Type which is specified in the Topology + #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoConfigure")] + pub auto_configure: Option, + /// Name is the name of the tag that represents this failure domain + pub name: String, + /// TagCategory is the category used for the tag + #[serde(rename = "tagCategory")] + pub tag_category: String, + /// Type is the type of failure domain, the current values are "Datacenter", "ComputeCluster" and "HostGroup" + #[serde(rename = "type")] + pub r#type: VSphereFailureDomainZoneType, +} + +/// Zone defines the name and type of a zone +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum VSphereFailureDomainZoneType { + Datacenter, + ComputeCluster, + HostGroup, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachines.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachines.rs new file mode 100644 index 000000000..6b0922c18 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachines.rs @@ -0,0 +1,221 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachines.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereMachineSpec defines the desired state of VSphereMachine +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereMachine", plural = "vspheremachines")] +#[kube(namespaced)] +#[kube(status = "VSphereMachineStatus")] +#[kube(schema = "disabled")] +pub struct VSphereMachineSpec { + /// CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customVMXKeys")] + pub custom_vmx_keys: Option>, + /// Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenter: Option, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "diskGiB")] + pub disk_gi_b: Option, + /// FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "memoryMiB")] + pub memory_mi_b: Option, + /// Network is the network configuration for this machine's VM. + pub network: VSphereMachineNetwork, + /// NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCPUs")] + pub num_cp_us: Option, + /// NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCoresPerSocket")] + pub num_cores_per_socket: Option, + /// ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, + /// Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// StoragePolicyName of the storage policy to use with this Virtual Machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "storagePolicyName")] + pub storage_policy_name: Option, + /// Template is the name or inventory path of the template used to clone the virtual machine. + pub template: String, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// Network is the network configuration for this machine's VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetwork { + /// Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name + pub devices: Vec, + /// PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredAPIServerCidr")] + pub preferred_api_server_cidr: Option, + /// Routes is a list of optional, static routes applied to the virtual machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, +} + +/// NetworkDeviceSpec defines the network configuration for a virtual machine's network device. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetworkDevices { + /// DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deviceName")] + pub device_name: Option, + /// DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp4: Option, + /// DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp6: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway4: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway6: Option, + /// IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "macAddr")] + pub mac_addr: Option, + /// MTU is the device’s Maximum Transmission Unit size in bytes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nameservers: Option>, + /// NetworkName is the name of the vSphere network to which the device will be connected. + #[serde(rename = "networkName")] + pub network_name: String, + /// Routes is a list of optional, static routes applied to the device. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, + /// SearchDomains is a list of search domains used when resolving IP addresses with DNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "searchDomains")] + pub search_domains: Option>, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetworkDevicesRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineNetworkRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// VSphereMachineStatus defines the observed state of VSphereMachine +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatus { + /// Addresses contains the VSphere instance associated addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub addresses: Option>, + /// Conditions defines current service state of the VSphereMachine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. + /// Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// FailureReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. + /// Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// Network returns the network status for each of the machine's configured network interfaces. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option>, + /// Ready is true when the provider resource is ready. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, +} + +/// MachineAddress contains information for the node's address. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatusAddresses { + /// The machine address. + pub address: String, + /// Machine address type, one of Hostname, ExternalIP or InternalIP. + #[serde(rename = "type")] + pub r#type: String, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// NetworkStatus provides information about one of a VM's networks. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineStatusNetwork { + /// Connected is a flag that indicates whether this network is currently connected to the VM. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub connected: Option, + /// IPAddrs is one or more IP addresses reported by vm-tools. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address of the network device. + #[serde(rename = "macAddr")] + pub mac_addr: String, + /// NetworkName is the name of the network. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkName")] + pub network_name: Option, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachinetemplates.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachinetemplates.rs new file mode 100644 index 000000000..093faf2e2 --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspheremachinetemplates.rs @@ -0,0 +1,172 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspheremachinetemplates.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereMachineTemplateSpec defines the desired state of VSphereMachineTemplate +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereMachineTemplate", plural = "vspheremachinetemplates")] +#[kube(namespaced)] +#[kube(schema = "disabled")] +pub struct VSphereMachineTemplateSpec { + /// VSphereMachineTemplateResource describes the data needed to create a VSphereMachine from a template + pub template: VSphereMachineTemplateTemplate, +} + +/// VSphereMachineTemplateResource describes the data needed to create a VSphereMachine from a template +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplate { + /// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// Spec is the specification of the desired behavior of the machine. + pub spec: VSphereMachineTemplateTemplateSpec, +} + +/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateMetadata { + /// Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations + #[serde(default, skip_serializing_if = "Option::is_none")] + pub annotations: Option>, + /// Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels + #[serde(default, skip_serializing_if = "Option::is_none")] + pub labels: Option>, +} + +/// Spec is the specification of the desired behavior of the machine. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpec { + /// CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customVMXKeys")] + pub custom_vmx_keys: Option>, + /// Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenter: Option, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "diskGiB")] + pub disk_gi_b: Option, + /// FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. For this infrastructure provider, the name is equivalent to the name of the VSphereDeploymentZone. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureDomain")] + pub failure_domain: Option, + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "memoryMiB")] + pub memory_mi_b: Option, + /// Network is the network configuration for this machine's VM. + pub network: VSphereMachineTemplateTemplateSpecNetwork, + /// NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCPUs")] + pub num_cp_us: Option, + /// NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCoresPerSocket")] + pub num_cores_per_socket: Option, + /// ProviderID is the virtual machine's BIOS UUID formated as vsphere://12345678-1234-1234-1234-123456789abc + #[serde(default, skip_serializing_if = "Option::is_none", rename = "providerID")] + pub provider_id: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, + /// Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// StoragePolicyName of the storage policy to use with this Virtual Machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "storagePolicyName")] + pub storage_policy_name: Option, + /// Template is the name or inventory path of the template used to clone the virtual machine. + pub template: String, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// Network is the network configuration for this machine's VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetwork { + /// Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name + pub devices: Vec, + /// PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredAPIServerCidr")] + pub preferred_api_server_cidr: Option, + /// Routes is a list of optional, static routes applied to the virtual machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, +} + +/// NetworkDeviceSpec defines the network configuration for a virtual machine's network device. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetworkDevices { + /// DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deviceName")] + pub device_name: Option, + /// DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp4: Option, + /// DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp6: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway4: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway6: Option, + /// IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "macAddr")] + pub mac_addr: Option, + /// MTU is the device’s Maximum Transmission Unit size in bytes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nameservers: Option>, + /// NetworkName is the name of the vSphere network to which the device will be connected. + #[serde(rename = "networkName")] + pub network_name: String, + /// Routes is a list of optional, static routes applied to the device. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, + /// SearchDomains is a list of search domains used when resolving IP addresses with DNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "searchDomains")] + pub search_domains: Option>, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetworkDevicesRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereMachineTemplateTemplateSpecNetworkRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + diff --git a/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherevms.rs b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherevms.rs new file mode 100644 index 000000000..7e8b7a7eb --- /dev/null +++ b/kube-custom-resources-rs/src/infrastructure_cluster_x_k8s_io/v1alpha4/vspherevms.rs @@ -0,0 +1,249 @@ +// WARNING: generated by kopium - manual changes will be overwritten +// kopium command: kopium --docs --filename=./crd-catalog/kubernetes-sigs/cluster-api-provider-vsphere/infrastructure.cluster.x-k8s.io/v1alpha4/vspherevms.yaml --derive=Default --derive=PartialEq +// kopium version: 0.16.5 + +use kube::CustomResource; +use serde::{Serialize, Deserialize}; +use std::collections::BTreeMap; + +/// VSphereVMSpec defines the desired state of VSphereVM. +#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +#[kube(group = "infrastructure.cluster.x-k8s.io", version = "v1alpha4", kind = "VSphereVM", plural = "vspherevms")] +#[kube(namespaced)] +#[kube(status = "VSphereVMStatus")] +#[kube(schema = "disabled")] +pub struct VSphereVMSpec { + /// BiosUUID is the VM's BIOS UUID that is assigned at runtime after the VM has been created. This field is required at runtime for other controllers that read this CRD as unstructured data. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "biosUUID")] + pub bios_uuid: Option, + /// BootstrapRef is a reference to a bootstrap provider-specific resource that holds configuration details. This field is optional in case no bootstrap data is required to create a VM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "bootstrapRef")] + pub bootstrap_ref: Option, + /// CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map + #[serde(default, skip_serializing_if = "Option::is_none", rename = "customVMXKeys")] + pub custom_vmx_keys: Option>, + /// Datacenter is the name or inventory path of the datacenter in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datacenter: Option, + /// Datastore is the name or inventory path of the datastore in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub datastore: Option, + /// DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "diskGiB")] + pub disk_gi_b: Option, + /// Folder is the name or inventory path of the folder in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub folder: Option, + /// MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "memoryMiB")] + pub memory_mi_b: Option, + /// Network is the network configuration for this machine's VM. + pub network: VSphereVMNetwork, + /// NumCPUs is the number of virtual processors in a virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCPUs")] + pub num_cp_us: Option, + /// NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the eponymous property value in the template from which the virtual machine is cloned. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "numCoresPerSocket")] + pub num_cores_per_socket: Option, + /// ResourcePool is the name or inventory path of the resource pool in which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] + pub resource_pool: Option, + /// Server is the IP address or FQDN of the vSphere server on which the virtual machine is created/located. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + /// Snapshot is the name of the snapshot from which to create a linked clone. This field is ignored if LinkedClone is not enabled. Defaults to the source's current snapshot. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// StoragePolicyName of the storage policy to use with this Virtual Machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "storagePolicyName")] + pub storage_policy_name: Option, + /// Template is the name or inventory path of the template used to clone the virtual machine. + pub template: String, + /// Thumbprint is the colon-separated SHA-1 checksum of the given vCenter server's host certificate When this is set to empty, this VirtualMachine would be created without TLS certificate validation of the communication between Cluster API Provider vSphere and the VMware vCenter server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thumbprint: Option, +} + +/// BootstrapRef is a reference to a bootstrap provider-specific resource that holds configuration details. This field is optional in case no bootstrap data is required to create a VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMBootstrapRef { + /// API version of the referent. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiVersion")] + pub api_version: Option, + /// If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldPath")] + pub field_path: Option, + /// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + #[serde(default, skip_serializing_if = "Option::is_none")] + pub kind: Option, + /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + #[serde(default, skip_serializing_if = "Option::is_none")] + pub namespace: Option, + /// Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceVersion")] + pub resource_version: Option, + /// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + #[serde(default, skip_serializing_if = "Option::is_none")] + pub uid: Option, +} + +/// Network is the network configuration for this machine's VM. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetwork { + /// Devices is the list of network devices used by the virtual machine. TODO(akutz) Make sure at least one network matches the ClusterSpec.CloudProviderConfiguration.Network.Name + pub devices: Vec, + /// PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API server endpoint on this machine + #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredAPIServerCidr")] + pub preferred_api_server_cidr: Option, + /// Routes is a list of optional, static routes applied to the virtual machine. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, +} + +/// NetworkDeviceSpec defines the network configuration for a virtual machine's network device. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetworkDevices { + /// DeviceName may be used to explicitly assign a name to the network device as it exists in the guest operating system. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "deviceName")] + pub device_name: Option, + /// DHCP4 is a flag that indicates whether or not to use DHCP for IPv4 on this device. If true then IPAddrs should not contain any IPv4 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp4: Option, + /// DHCP6 is a flag that indicates whether or not to use DHCP for IPv6 on this device. If true then IPAddrs should not contain any IPv6 addresses. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dhcp6: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP4 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway4: Option, + /// Gateway4 is the IPv4 gateway used by this device. Required when DHCP6 is false. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub gateway6: Option, + /// IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign to this device. IP addresses must also specify the segment length in CIDR notation. Required when DHCP4 and DHCP6 are both false. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address used by this device. It is generally a good idea to omit this field and allow a MAC address to be generated. Please note that this value must use the VMware OUI to work with the in-tree vSphere cloud provider. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "macAddr")] + pub mac_addr: Option, + /// MTU is the device’s Maximum Transmission Unit size in bytes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mtu: Option, + /// Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers. Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub nameservers: Option>, + /// NetworkName is the name of the vSphere network to which the device will be connected. + #[serde(rename = "networkName")] + pub network_name: String, + /// Routes is a list of optional, static routes applied to the device. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub routes: Option>, + /// SearchDomains is a list of search domains used when resolving IP addresses with DNS. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "searchDomains")] + pub search_domains: Option>, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetworkDevicesRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// NetworkRouteSpec defines a static network route. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMNetworkRoutes { + /// Metric is the weight/priority of the route. + pub metric: i32, + /// To is an IPv4 or IPv6 address. + pub to: String, + /// Via is an IPv4 or IPv6 address. + pub via: String, +} + +/// VSphereVMStatus defines the observed state of VSphereVM +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMStatus { + /// Addresses is a list of the VM's IP addresses. This field is required at runtime for other controllers that read this CRD as unstructured data. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub addresses: Option>, + /// CloneMode is the type of clone operation used to clone this VM. Since LinkedMode is the default but fails gracefully if the source of the clone has no snapshots, this field may be used to determine the actual type of clone operation used to create this VM. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneMode")] + pub clone_mode: Option, + /// Conditions defines current service state of the VSphereVM. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub conditions: Option>, + /// FailureMessage will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a more verbose string suitable for logging and human consumption. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. + /// Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureMessage")] + pub failure_message: Option, + /// FailureReason will be set in the event that there is a terminal problem reconciling the vspherevm and will contain a succinct value suitable for vm interpretation. + /// This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the vm. + /// Any transient errors that occur during the reconciliation of vspherevms can be added as events to the vspherevm object and/or logged in the controller's output. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] + pub failure_reason: Option, + /// Network returns the network status for each of the machine's configured network interfaces. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub network: Option>, + /// Ready is true when the provider resource is ready. This field is required at runtime for other controllers that read this CRD as unstructured data. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ready: Option, + /// RetryAfter tracks the time we can retry queueing a task + #[serde(default, skip_serializing_if = "Option::is_none", rename = "retryAfter")] + pub retry_after: Option, + /// Snapshot is the name of the snapshot from which the VM was cloned if LinkedMode is enabled. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub snapshot: Option, + /// TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "taskRef")] + pub task_ref: Option, +} + +/// Condition defines an observation of a Cluster API resource operational state. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMStatusConditions { + /// Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastTransitionTime")] + pub last_transition_time: Option, + /// A human readable message indicating details about the transition. This field may be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub message: Option, + /// The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reason: Option, + /// Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub severity: Option, + /// Status of the condition, one of True, False, Unknown. + pub status: String, + /// Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + #[serde(rename = "type")] + pub r#type: String, +} + +/// NetworkStatus provides information about one of a VM's networks. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct VSphereVMStatusNetwork { + /// Connected is a flag that indicates whether this network is currently connected to the VM. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub connected: Option, + /// IPAddrs is one or more IP addresses reported by vm-tools. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipAddrs")] + pub ip_addrs: Option>, + /// MACAddr is the MAC address of the network device. + #[serde(rename = "macAddr")] + pub mac_addr: String, + /// NetworkName is the name of the network. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkName")] + pub network_name: Option, +} + diff --git a/kube-custom-resources-rs/src/jobset_x_k8s_io/v1alpha2/jobsets.rs b/kube-custom-resources-rs/src/jobset_x_k8s_io/v1alpha2/jobsets.rs index 45c9eb237..9b181a7f2 100644 --- a/kube-custom-resources-rs/src/jobset_x_k8s_io/v1alpha2/jobsets.rs +++ b/kube-custom-resources-rs/src/jobset_x_k8s_io/v1alpha2/jobsets.rs @@ -14,7 +14,10 @@ use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; #[kube(status = "JobSetStatus")] #[kube(schema = "disabled")] pub struct JobSetSpec { - /// FailurePolicy, if set, configures when to declare the JobSet as failed. The JobSet is always declared failed if any job in the set finished with status failed. + /// FailurePolicy, if set, configures when to declare the JobSet as + /// failed. + /// The JobSet is always declared failed if any job in the set + /// finished with status failed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failurePolicy")] pub failure_policy: Option, /// Network defines the networking options for the jobset. @@ -23,7 +26,13 @@ pub struct JobSetSpec { /// ReplicatedJobs is the group of jobs that will form the set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "replicatedJobs")] pub replicated_jobs: Option>, - /// SuccessPolicy configures when to declare the JobSet as succeeded. The JobSet is always declared succeeded if all jobs in the set finished with status complete. + /// StartupPolicy, if set, configures in what order jobs must be started + #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupPolicy")] + pub startup_policy: Option, + /// SuccessPolicy configures when to declare the JobSet as + /// succeeded. + /// The JobSet is always declared succeeded if all jobs in the set + /// finished with status complete. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successPolicy")] pub success_policy: Option, /// Suspend suspends all running child Jobs when set to true. @@ -31,10 +40,14 @@ pub struct JobSetSpec { pub suspend: Option, } -/// FailurePolicy, if set, configures when to declare the JobSet as failed. The JobSet is always declared failed if any job in the set finished with status failed. +/// FailurePolicy, if set, configures when to declare the JobSet as +/// failed. +/// The JobSet is always declared failed if any job in the set +/// finished with status failed. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetFailurePolicy { - /// MaxRestarts defines the limit on the number of JobSet restarts. A restart is achieved by recreating all active child jobs. + /// MaxRestarts defines the limit on the number of JobSet restarts. + /// A restart is achieved by recreating all active child jobs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxRestarts")] pub max_restarts: Option, } @@ -42,19 +55,25 @@ pub struct JobSetFailurePolicy { /// Network defines the networking options for the jobset. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetNetwork { - /// EnableDNSHostnames allows pods to be reached via their hostnames. Pods will be reachable using the fully qualified pod hostname: ---. + /// EnableDNSHostnames allows pods to be reached via their hostnames. + /// Pods will be reachable using the fully qualified pod hostname: + /// ---. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableDNSHostnames")] pub enable_dns_hostnames: Option, - /// Subdomain is an explicit choice for a network subdomain name When set, any replicated job in the set is added to this network. Defaults to if not set. + /// Subdomain is an explicit choice for a network subdomain name + /// When set, any replicated job in the set is added to this network. + /// Defaults to if not set. #[serde(default, skip_serializing_if = "Option::is_none")] pub subdomain: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobs { - /// Name is the name of the entry and will be used as a suffix for the Job name. + /// Name is the name of the entry and will be used as a suffix + /// for the Job name. pub name: String, - /// Replicas is the number of jobs that will be created from this ReplicatedJob's template. Jobs names will be in the format: -- + /// Replicas is the number of jobs that will be created from this ReplicatedJob's template. + /// Jobs names will be in the format: -- #[serde(default, skip_serializing_if = "Option::is_none")] pub replicas: Option, /// Template defines the template of the Job that will be created. @@ -64,15 +83,18 @@ pub struct JobSetReplicatedJobs { /// Template defines the template of the Job that will be created. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplate { - /// Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + /// Standard object's metadata of the jobs created from this template. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata #[serde(default, skip_serializing_if = "Option::is_none")] pub metadata: Option, - /// Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + /// Specification of the desired behavior of the job. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status #[serde(default, skip_serializing_if = "Option::is_none")] pub spec: Option, } -/// Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +/// Standard object's metadata of the jobs created from this template. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateMetadata { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -87,75 +109,201 @@ pub struct JobSetReplicatedJobsTemplateMetadata { pub namespace: Option, } -/// Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +/// Specification of the desired behavior of the job. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpec { - /// Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again. + /// Specifies the duration in seconds relative to the startTime that the job + /// may be continuously active before the system tries to terminate it; value + /// must be positive integer. If a Job is suspended (at creation or through an + /// update), this timer will effectively be stopped and reset when the Job is + /// resumed again. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activeDeadlineSeconds")] pub active_deadline_seconds: Option, - /// Specifies the number of retries before marking this job failed. Defaults to 6 + /// Specifies the number of retries before marking this job failed. + /// Defaults to 6 #[serde(default, skip_serializing_if = "Option::is_none", rename = "backoffLimit")] pub backoff_limit: Option, - /// Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + /// Specifies the limit for the number of retries within an + /// index before marking this index as failed. When enabled the number of + /// failures per index is kept in the pod's + /// batch.kubernetes.io/job-index-failure-count annotation. It can only + /// be set when Job's completionMode=Indexed, and the Pod's restart + /// policy is Never. The field is immutable. + /// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + /// feature gate is enabled (enabled by default). #[serde(default, skip_serializing_if = "Option::is_none", rename = "backoffLimitPerIndex")] pub backoff_limit_per_index: Option, - /// completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. - /// `NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. - /// `Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`. - /// More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job. + /// completionMode specifies how Pod completions are tracked. It can be + /// `NonIndexed` (default) or `Indexed`. + /// + /// + /// `NonIndexed` means that the Job is considered complete when there have + /// been .spec.completions successfully completed Pods. Each Pod completion is + /// homologous to each other. + /// + /// + /// `Indexed` means that the Pods of a + /// Job get an associated completion index from 0 to (.spec.completions - 1), + /// available in the annotation batch.kubernetes.io/job-completion-index. + /// The Job is considered complete when there is one successfully completed Pod + /// for each index. + /// When value is `Indexed`, .spec.completions must be specified and + /// `.spec.parallelism` must be less than or equal to 10^5. + /// In addition, The Pod name takes the form + /// `$(job-name)-$(index)-$(random-string)`, + /// the Pod hostname takes the form `$(job-name)-$(index)`. + /// + /// + /// More completion modes can be added in the future. + /// If the Job controller observes a mode that it doesn't recognize, which + /// is possible during upgrades due to version skew, the controller + /// skips updates for the Job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "completionMode")] pub completion_mode: Option, - /// Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + /// Specifies the desired number of successfully finished pods the + /// job should be run with. Setting to null means that the success of any + /// pod signals the success of all pods, and allows parallelism to have any positive + /// value. Setting to 1 means that parallelism is limited to 1 and the success of that + /// pod signals the success of the job. + /// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ #[serde(default, skip_serializing_if = "Option::is_none")] pub completions: Option, - /// manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector + /// manualSelector controls generation of pod labels and pod selectors. + /// Leave `manualSelector` unset unless you are certain what you are doing. + /// When false or unset, the system pick labels unique to this job + /// and appends those labels to the pod template. When true, + /// the user is responsible for picking unique labels and specifying + /// the selector. Failure to pick a unique label may cause this + /// and other jobs to not function correctly. However, You may see + /// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + /// API. + /// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector #[serde(default, skip_serializing_if = "Option::is_none", rename = "manualSelector")] pub manual_selector: Option, - /// Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + /// Specifies the maximal number of failed indexes before marking the Job as + /// failed, when backoffLimitPerIndex is set. Once the number of failed + /// indexes exceeds this number the entire Job is marked as Failed and its + /// execution is terminated. When left as null the job continues execution of + /// all of its indexes and is marked with the `Complete` Job condition. + /// It can only be specified when backoffLimitPerIndex is set. + /// It can be null or up to completions. It is required and must be + /// less than or equal to 10^4 when is completions greater than 10^5. + /// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + /// feature gate is enabled (enabled by default). #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxFailedIndexes")] pub max_failed_indexes: Option, - /// Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + /// Specifies the maximum desired number of pods the job should + /// run at any given time. The actual number of pods running in steady state will + /// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + /// i.e. when the work left to do is less than max parallelism. + /// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ #[serde(default, skip_serializing_if = "Option::is_none")] pub parallelism: Option, - /// Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure. - /// This field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default). + /// Specifies the policy of handling failed pods. In particular, it allows to + /// specify the set of actions and conditions which need to be + /// satisfied to take the associated action. + /// If empty, the default behaviour applies - the counter of failed pods, + /// represented by the jobs's .status.failed field, is incremented and it is + /// checked against the backoffLimit. This field cannot be used in combination + /// with restartPolicy=OnFailure. + /// + /// + /// This field is beta-level. It can be used when the `JobPodFailurePolicy` + /// feature gate is enabled (enabled by default). #[serde(default, skip_serializing_if = "Option::is_none", rename = "podFailurePolicy")] pub pod_failure_policy: Option, - /// podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods when they are terminating (has a metadata.deletionTimestamp) or failed. - Failed means to wait until a previously created Pod is fully terminated (has phase Failed or Succeeded) before creating a replacement Pod. - /// When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default. + /// podReplacementPolicy specifies when to create replacement Pods. + /// Possible values are: + /// - TerminatingOrFailed means that we recreate pods + /// when they are terminating (has a metadata.deletionTimestamp) or failed. + /// - Failed means to wait until a previously created Pod is fully terminated (has phase + /// Failed or Succeeded) before creating a replacement Pod. + /// + /// + /// When using podFailurePolicy, Failed is the the only allowed value. + /// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + /// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + /// This is on by default. #[serde(default, skip_serializing_if = "Option::is_none", rename = "podReplacementPolicy")] pub pod_replacement_policy: Option, - /// A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + /// A label query over pods that should match the pod count. + /// Normally, the system sets this field for you. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors #[serde(default, skip_serializing_if = "Option::is_none")] pub selector: Option, - /// suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false. + /// suspend specifies whether the Job controller should create Pods or not. If + /// a Job is created with suspend set to true, no Pods are created by the Job + /// controller. If a Job is suspended after creation (i.e. the flag goes from + /// false to true), the Job controller will delete all active Pods associated + /// with this Job. Users must design their workload to gracefully handle this. + /// Suspending a Job will reset the StartTime field of the Job, effectively + /// resetting the ActiveDeadlineSeconds timer too. Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none")] pub suspend: Option, - /// Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + /// Describes the pod that will be created when executing a job. + /// The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". + /// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ pub template: JobSetReplicatedJobsTemplateSpecTemplate, - /// ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. + /// ttlSecondsAfterFinished limits the lifetime of a Job that has finished + /// execution (either Complete or Failed). If this field is set, + /// ttlSecondsAfterFinished after the Job finishes, it is eligible to be + /// automatically deleted. When the Job is being deleted, its lifecycle + /// guarantees (e.g. finalizers) will be honored. If this field is unset, + /// the Job won't be automatically deleted. If this field is set to zero, + /// the Job becomes eligible to be deleted immediately after it finishes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ttlSecondsAfterFinished")] pub ttl_seconds_after_finished: Option, } -/// Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure. -/// This field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default). +/// Specifies the policy of handling failed pods. In particular, it allows to +/// specify the set of actions and conditions which need to be +/// satisfied to take the associated action. +/// If empty, the default behaviour applies - the counter of failed pods, +/// represented by the jobs's .status.failed field, is incremented and it is +/// checked against the backoffLimit. This field cannot be used in combination +/// with restartPolicy=OnFailure. +/// +/// +/// This field is beta-level. It can be used when the `JobPodFailurePolicy` +/// feature gate is enabled (enabled by default). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecPodFailurePolicy { - /// A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed. + /// A list of pod failure policy rules. The rules are evaluated in order. + /// Once a rule matches a Pod failure, the remaining of the rules are ignored. + /// When no rule matches the Pod failure, the default handling applies - the + /// counter of pod failures is incremented and it is checked against + /// the backoffLimit. At most 20 elements are allowed. pub rules: Vec, } -/// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule. +/// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. +/// One of onExitCodes and onPodConditions, but not both, can be used in each rule. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecPodFailurePolicyRules { - /// Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - /// - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. This value is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule. + /// Specifies the action taken on a pod failure when the requirements are satisfied. + /// Possible values are: + /// + /// + /// - FailJob: indicates that the pod's job is marked as Failed and all + /// running pods are terminated. + /// - FailIndex: indicates that the pod's index is marked as Failed and will + /// not be restarted. + /// This value is beta-level. It can be used when the + /// `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + /// - Ignore: indicates that the counter towards the .backoffLimit is not + /// incremented and a replacement pod is created. + /// - Count: indicates that the pod is handled in the default way - the + /// counter towards the .backoffLimit is incremented. + /// Additional values are considered to be added in the future. Clients should + /// react to an unknown action by skipping the rule. pub action: String, /// Represents the requirement on the container exit codes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "onExitCodes")] pub on_exit_codes: Option, - /// Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed. + /// Represents the requirement on the pod conditions. The requirement is represented + /// as a list of pod condition patterns. The requirement is satisfied if at + /// least one pattern matches an actual pod condition. At most 20 elements are allowed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "onPodConditions")] pub on_pod_conditions: Option>, } @@ -163,61 +311,97 @@ pub struct JobSetReplicatedJobsTemplateSpecPodFailurePolicyRules { /// Represents the requirement on the container exit codes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecPodFailurePolicyRulesOnExitCodes { - /// Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template. + /// Restricts the check for exit codes to the container with the + /// specified name. When null, the rule applies to all containers. + /// When specified, it should match one the container or initContainer + /// names in the pod template. #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerName")] pub container_name: Option, - /// Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - /// - In: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is in the set of specified values. - NotIn: the requirement is satisfied if at least one container exit code (might be multiple if there are multiple containers not restricted by the 'containerName' field) is not in the set of specified values. Additional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied. + /// Represents the relationship between the container exit code(s) and the + /// specified values. Containers completed with success (exit code 0) are + /// excluded from the requirement check. Possible values are: + /// + /// + /// - In: the requirement is satisfied if at least one container exit code + /// (might be multiple if there are multiple containers not restricted + /// by the 'containerName' field) is in the set of specified values. + /// - NotIn: the requirement is satisfied if at least one container exit code + /// (might be multiple if there are multiple containers not restricted + /// by the 'containerName' field) is not in the set of specified values. + /// Additional values are considered to be added in the future. Clients should + /// react to an unknown operator by assuming the requirement is not satisfied. pub operator: String, - /// Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed. + /// Specifies the set of values. Each returned container exit code (might be + /// multiple in case of multiple containers) is checked against this set of + /// values with respect to the operator. The list of values must be ordered + /// and must not contain duplicates. Value '0' cannot be used for the In operator. + /// At least one element is required. At most 255 elements are allowed. pub values: Vec, } -/// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type. +/// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching +/// an actual pod condition type. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecPodFailurePolicyRulesOnPodConditions { - /// Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True. + /// Specifies the required Pod condition status. To match a pod condition + /// it is required that the specified status equals the pod condition status. + /// Defaults to True. pub status: String, - /// Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type. + /// Specifies the required Pod condition type. To match a pod condition + /// it is required that specified type equals the pod condition type. #[serde(rename = "type")] pub r#type: String, } -/// A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +/// A label query over pods that should match the pod count. +/// Normally, the system sets this field for you. +/// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ +/// Describes the pod that will be created when executing a job. +/// The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". +/// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplate { - /// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + /// Standard object's metadata. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata #[serde(default, skip_serializing_if = "Option::is_none")] pub metadata: Option, - /// Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + /// Specification of the desired behavior of the pod. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status #[serde(default, skip_serializing_if = "Option::is_none")] pub spec: Option, } -/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +/// Standard object's metadata. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateMetadata { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -232,10 +416,13 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateMetadata { pub namespace: Option, } -/// Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +/// Specification of the desired behavior of the pod. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpec { - /// Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + /// Optional duration in seconds the pod may be active on the node relative to + /// StartTime before the system will actively try to mark it failed and kill associated containers. + /// Value must be a positive integer. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activeDeadlineSeconds")] pub active_deadline_seconds: Option, /// If specified, the pod's scheduling constraints @@ -244,117 +431,255 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpec { /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. #[serde(default, skip_serializing_if = "Option::is_none", rename = "automountServiceAccountToken")] pub automount_service_account_token: Option, - /// List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + /// List of containers belonging to the pod. + /// Containers cannot currently be added or removed. + /// There must be at least one container in a Pod. + /// Cannot be updated. pub containers: Vec, - /// Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + /// Specifies the DNS parameters of a pod. + /// Parameters specified here will be merged to the generated DNS + /// configuration based on DNSPolicy. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsConfig")] pub dns_config: Option, - /// Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + /// Set DNS policy for the pod. + /// Defaults to "ClusterFirst". + /// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + /// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + /// To have DNS options set along with hostNetwork, you have to specify DNS policy + /// explicitly to 'ClusterFirstWithHostNet'. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsPolicy")] pub dns_policy: Option, - /// EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. + /// EnableServiceLinks indicates whether information about services should be injected into pod's + /// environment variables, matching the syntax of Docker links. + /// Optional: Defaults to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableServiceLinks")] pub enable_service_links: Option, - /// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + /// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + /// pod to perform user-initiated actions such as debugging. This list cannot be specified when + /// creating a pod, and it cannot be modified by updating the pod spec. In order to add an + /// ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ephemeralContainers")] pub ephemeral_containers: Option>, - /// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + /// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + /// file if specified. This is only valid for non-hostNetwork pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostAliases")] pub host_aliases: Option>, - /// Use the host's ipc namespace. Optional: Default to false. + /// Use the host's ipc namespace. + /// Optional: Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIPC")] pub host_ipc: Option, - /// Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + /// Host networking requested for this pod. Use the host's network namespace. + /// If this option is set, the ports that will be used must be specified. + /// Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostNetwork")] pub host_network: Option, - /// Use the host's pid namespace. Optional: Default to false. + /// Use the host's pid namespace. + /// Optional: Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPID")] pub host_pid: Option, - /// Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + /// Use the host's user namespace. + /// Optional: Default to true. + /// If set to true or not present, the pod will be run in the host user namespace, useful + /// for when the pod needs a feature only available to the host user namespace, such as + /// loading a kernel module with CAP_SYS_MODULE. + /// When set to false, a new userns is created for the pod. Setting false is useful for + /// mitigating container breakout vulnerabilities even allowing users to run their + /// containers as root without actually having root privileges on the host. + /// This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostUsers")] pub host_users: Option, - /// Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + /// Specifies the hostname of the Pod + /// If not specified, the pod's hostname will be set to a system-defined value. #[serde(default, skip_serializing_if = "Option::is_none")] pub hostname: Option, - /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + /// If specified, these secrets will be passed to individual puller implementations for them to use. + /// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullSecrets")] pub image_pull_secrets: Option>, - /// List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + /// List of initialization containers belonging to the pod. + /// Init containers are executed in order prior to containers being started. If any + /// init container fails, the pod is considered to have failed and is handled according + /// to its restartPolicy. The name for an init container or normal container must be + /// unique among all containers. + /// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + /// The resourceRequirements of an init container are taken into account during scheduling + /// by finding the highest request/limit for each resource type, and then using the max of + /// of that value or the sum of the normal containers. Limits are applied to init containers + /// in a similar fashion. + /// Init containers cannot currently be added or removed. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "initContainers")] pub init_containers: Option>, - /// NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + /// NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + /// the scheduler simply schedules this pod onto that node, assuming that it fits resource + /// requirements. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeName")] pub node_name: Option, - /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + /// NodeSelector is a selector which must be true for the pod to fit on a node. + /// Selector which must match a node's labels for the pod to be scheduled on that node. + /// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeSelector")] pub node_selector: Option>, - /// Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. - /// If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions - /// If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup + /// Specifies the OS of the containers in the pod. + /// Some pod and container fields are restricted if this is set. + /// + /// + /// If the OS field is set to linux, the following fields must be unset: + /// -securityContext.windowsOptions + /// + /// + /// If the OS field is set to windows, following fields must be unset: + /// - spec.hostPID + /// - spec.hostIPC + /// - spec.hostUsers + /// - spec.securityContext.seLinuxOptions + /// - spec.securityContext.seccompProfile + /// - spec.securityContext.fsGroup + /// - spec.securityContext.fsGroupChangePolicy + /// - spec.securityContext.sysctls + /// - spec.shareProcessNamespace + /// - spec.securityContext.runAsUser + /// - spec.securityContext.runAsGroup + /// - spec.securityContext.supplementalGroups + /// - spec.containers[*].securityContext.seLinuxOptions + /// - spec.containers[*].securityContext.seccompProfile + /// - spec.containers[*].securityContext.capabilities + /// - spec.containers[*].securityContext.readOnlyRootFilesystem + /// - spec.containers[*].securityContext.privileged + /// - spec.containers[*].securityContext.allowPrivilegeEscalation + /// - spec.containers[*].securityContext.procMount + /// - spec.containers[*].securityContext.runAsUser + /// - spec.containers[*].securityContext.runAsGroup #[serde(default, skip_serializing_if = "Option::is_none")] pub os: Option, - /// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + /// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + /// This field will be autopopulated at admission time by the RuntimeClass admission controller. If + /// the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + /// The RuntimeClass admission controller will reject Pod create requests which have the overhead already + /// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + /// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + /// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub overhead: Option>, - /// PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + /// PreemptionPolicy is the Policy for preempting pods with lower priority. + /// One of Never, PreemptLowerPriority. + /// Defaults to PreemptLowerPriority if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preemptionPolicy")] pub preemption_policy: Option, - /// The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + /// The priority value. Various system components use this field to find the + /// priority of the pod. When Priority Admission Controller is enabled, it + /// prevents users from setting this field. The admission controller populates + /// this field from PriorityClassName. + /// The higher the value, the higher the priority. #[serde(default, skip_serializing_if = "Option::is_none")] pub priority: Option, - /// If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + /// If specified, indicates the pod's priority. "system-node-critical" and + /// "system-cluster-critical" are two special keywords which indicate the + /// highest priorities with the former being the highest priority. Any other + /// name must be defined by creating a PriorityClass object with that name. + /// If not specified, the pod priority will be default or zero if there is no + /// default. #[serde(default, skip_serializing_if = "Option::is_none", rename = "priorityClassName")] pub priority_class_name: Option, - /// If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + /// If specified, all readiness gates will be evaluated for pod readiness. + /// A pod is ready when all its containers are ready AND + /// all conditions specified in the readiness gates have status equal to "True" + /// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessGates")] pub readiness_gates: Option>, - /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. + /// ResourceClaims defines which ResourceClaims must be allocated + /// and reserved before the Pod is allowed to start. The resources + /// will be made available to those containers which consume them + /// by name. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaims")] pub resource_claims: Option>, - /// Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + /// Restart policy for all containers within the pod. + /// One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + /// Default to Always. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + /// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + /// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + /// empty definition that uses the default runtime handler. + /// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(default, skip_serializing_if = "Option::is_none", rename = "runtimeClassName")] pub runtime_class_name: Option, - /// If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + /// If specified, the pod will be dispatched by specified scheduler. + /// If not specified, the pod will be dispatched by default scheduler. #[serde(default, skip_serializing_if = "Option::is_none", rename = "schedulerName")] pub scheduler_name: Option, - /// SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. - /// SchedulingGates can only be set at pod creation time, and be removed only afterwards. - /// This is a beta feature enabled by the PodSchedulingReadiness feature gate. + /// SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + /// If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + /// scheduler will not attempt to schedule the pod. + /// + /// + /// SchedulingGates can only be set at pod creation time, and be removed only afterwards. + /// + /// + /// This is a beta feature enabled by the PodSchedulingReadiness feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "schedulingGates")] pub scheduling_gates: Option>, - /// SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. + /// SecurityContext holds pod-level security attributes and common container settings. + /// Optional: Defaults to empty. See type description for default values of each field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, - /// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. + /// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + /// Deprecated: Use serviceAccountName instead. #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccount")] pub service_account: Option, - /// ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + /// ServiceAccountName is the name of the ServiceAccount to use to run this pod. + /// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccountName")] pub service_account_name: Option, - /// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + /// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + /// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + /// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + /// If a pod does not have FQDN, this has no effect. + /// Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "setHostnameAsFQDN")] pub set_hostname_as_fqdn: Option, - /// Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. + /// Share a single process namespace between all of the containers in a pod. + /// When this is set containers will be able to view and signal processes from other containers + /// in the same pod, and the first process in each container will not be assigned PID 1. + /// HostPID and ShareProcessNamespace cannot both be set. + /// Optional: Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "shareProcessNamespace")] pub share_process_namespace: Option, - /// If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + /// If specified, the fully qualified Pod hostname will be "...svc.". + /// If not specified, the pod will not have a domainname at all. #[serde(default, skip_serializing_if = "Option::is_none")] pub subdomain: Option, - /// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + /// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// If this value is nil, the default grace period will be used instead. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// Defaults to 30 seconds. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, /// If specified, the pod's tolerations. #[serde(default, skip_serializing_if = "Option::is_none")] pub tolerations: Option>, - /// TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + /// TopologySpreadConstraints describes how a group of pods ought to spread across topology + /// domains. Scheduler will schedule pods in a way which abides by the constraints. + /// All topologySpreadConstraints are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologySpreadConstraints")] pub topology_spread_constraints: Option>, - /// List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes + /// List of volumes that can be mounted by containers belonging to the pod. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes #[serde(default, skip_serializing_if = "Option::is_none")] pub volumes: Option>, } @@ -376,15 +701,28 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinity { /// Describes node affinity scheduling rules for the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinity { - /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + /// The scheduler will prefer to schedule pods to nodes that satisfy + /// the affinity expressions specified by this field, but it may choose + /// a node that violates one or more of the expressions. The node that is + /// most preferred is the one with the greatest sum of weights, i.e. + /// for each node that meets all of the scheduling requirements (resource + /// request, requiredDuringScheduling affinity expressions, etc.), + /// compute a sum by iterating through the elements of this field and adding + /// "weight" to the sum if the node matches the corresponding matchExpressions; the + /// node(s) with the highest sum are the most preferred. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] pub preferred_during_scheduling_ignored_during_execution: Option>, - /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + /// If the affinity requirements specified by this field are not met at + /// scheduling time, the pod will not be scheduled onto the node. + /// If the affinity requirements specified by this field cease to be met + /// at some point during pod execution (e.g. due to an update), the system + /// may or may not try to eventually evict the pod from its node. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] pub required_during_scheduling_ignored_during_execution: Option, } -/// An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +/// An empty preferred scheduling term matches all objects with implicit weight 0 +/// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution { /// A node selector term, associated with the corresponding weight. @@ -404,31 +742,47 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityPrefe pub match_fields: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. +/// If the affinity requirements specified by this field are not met at +/// scheduling time, the pod will not be scheduled onto the node. +/// If the affinity requirements specified by this field cease to be met +/// at some point during pod execution (e.g. due to an update), the system +/// may or may not try to eventually evict the pod from its node. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution { /// Required. A list of node selector terms. The terms are ORed. @@ -436,7 +790,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityRequi pub node_selector_terms: Vec, } -/// A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +/// A null or empty node selector term matches no objects. The requirements of +/// them are ANDed. +/// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms { /// A list of node selector requirements by node's labels. @@ -447,26 +803,38 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityRequi pub match_fields: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -474,10 +842,24 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityNodeAffinityRequi /// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinity { - /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + /// The scheduler will prefer to schedule pods to nodes that satisfy + /// the affinity expressions specified by this field, but it may choose + /// a node that violates one or more of the expressions. The node that is + /// most preferred is the one with the greatest sum of weights, i.e. + /// for each node that meets all of the scheduling requirements (resource + /// request, requiredDuringScheduling affinity expressions, etc.), + /// compute a sum by iterating through the elements of this field and adding + /// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + /// node(s) with the highest sum are the most preferred. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] pub preferred_during_scheduling_ignored_during_execution: Option>, - /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + /// If the affinity requirements specified by this field are not met at + /// scheduling time, the pod will not be scheduled onto the node. + /// If the affinity requirements specified by this field cease to be met + /// at some point during pod execution (e.g. due to a pod label update), the + /// system may or may not try to eventually evict the pod from its node. + /// When there are multiple elements, the lists of nodes corresponding to each + /// podAffinityTerm are intersected, i.e. all terms must be satisfied. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] pub required_during_scheduling_ignored_during_execution: Option>, } @@ -488,144 +870,244 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityPrefer /// Required. A pod affinity term, associated with the corresponding weight. #[serde(rename = "podAffinityTerm")] pub pod_affinity_term: JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, - /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + /// weight associated with matching the corresponding podAffinityTerm, + /// in the range 1-100. pub weight: i32, } /// Required. A pod affinity term, associated with the corresponding weight. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +/// Defines a set of pods (namely those matching the labelSelector +/// relative to the given namespace(s)) that this pod should be +/// co-located (affinity) or not co-located (anti-affinity) with, +/// where co-located is defined as running on a node whose value of +/// the label with key matches that of any node on which +/// a pod of the set of pods is running #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -633,10 +1115,24 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAffinityRequir /// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinity { - /// The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + /// The scheduler will prefer to schedule pods to nodes that satisfy + /// the anti-affinity expressions specified by this field, but it may choose + /// a node that violates one or more of the expressions. The node that is + /// most preferred is the one with the greatest sum of weights, i.e. + /// for each node that meets all of the scheduling requirements (resource + /// request, requiredDuringScheduling anti-affinity expressions, etc.), + /// compute a sum by iterating through the elements of this field and adding + /// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + /// node(s) with the highest sum are the most preferred. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] pub preferred_during_scheduling_ignored_during_execution: Option>, - /// If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + /// If the anti-affinity requirements specified by this field are not met at + /// scheduling time, the pod will not be scheduled onto the node. + /// If the anti-affinity requirements specified by this field cease to be met + /// at some point during pod execution (e.g. due to a pod label update), the + /// system may or may not try to eventually evict the pod from its node. + /// When there are multiple elements, the lists of nodes corresponding to each + /// podAffinityTerm are intersected, i.e. all terms must be satisfied. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] pub required_during_scheduling_ignored_during_execution: Option>, } @@ -647,144 +1143,244 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityPr /// Required. A pod affinity term, associated with the corresponding weight. #[serde(rename = "podAffinityTerm")] pub pod_affinity_term: JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, - /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + /// weight associated with matching the corresponding podAffinityTerm, + /// in the range 1-100. pub weight: i32, } /// Required. A pod affinity term, associated with the corresponding weight. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +/// Defines a set of pods (namely those matching the labelSelector +/// relative to the given namespace(s)) that this pod should be +/// co-located (affinity) or not co-located (anti-affinity) with, +/// where co-located is defined as running on a node whose value of +/// the label with key matches that of any node on which +/// a pod of the set of pods is running #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -792,75 +1388,166 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecAffinityPodAntiAffinityRe /// A single application container that you want to run within a pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainers { - /// Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Arguments to the entrypoint. + /// The container image's CMD is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub args: Option>, - /// Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Entrypoint array. Not executed within a shell. + /// The container image's ENTRYPOINT is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, - /// List of environment variables to set in the container. Cannot be updated. + /// List of environment variables to set in the container. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub env: Option>, - /// List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + /// List of sources to populate environment variables in the container. + /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys + /// will be reported as an event when the container is starting. When a key exists in multiple + /// sources, the value associated with the last source will take precedence. + /// Values defined by an Env with a duplicate key will take precedence. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "envFrom")] pub env_from: Option>, - /// Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + /// Container image name. + /// More info: https://kubernetes.io/docs/concepts/containers/images + /// This field is optional to allow higher level config management to default or override + /// container images in workload controllers like Deployments and StatefulSets. #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + /// Image pull policy. + /// One of Always, Never, IfNotPresent. + /// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")] pub image_pull_policy: Option, - /// Actions that the management system should take in response to container lifecycle events. Cannot be updated. + /// Actions that the management system should take in response to container lifecycle events. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub lifecycle: Option, - /// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container liveness. + /// Container will be restarted if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + /// Name of the container specified as a DNS_LABEL. + /// Each container in a pod must have a unique name (DNS_LABEL). + /// Cannot be updated. pub name: String, - /// List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + /// List of ports to expose from the container. Not specifying a port here + /// DOES NOT prevent that port from being exposed. Any port which is + /// listening on the default "0.0.0.0" address inside a container will be + /// accessible from the network. + /// Modifying this array with strategic merge patch may corrupt the data. + /// For more information See https://github.com/kubernetes/kubernetes/issues/108255. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub ports: Option>, - /// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container service readiness. + /// Container will be removed from service endpoints if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessProbe")] pub readiness_probe: Option, /// Resources resize policy for the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resizePolicy")] pub resize_policy: Option>, - /// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Compute Resources required by this container. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, - /// RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. + /// RestartPolicy defines the restart behavior of individual containers in a pod. + /// This field may only be set for init containers, and the only allowed value is "Always". + /// For non-init containers or when this field is not specified, + /// the restart behavior is defined by the Pod's restart policy and the container type. + /// Setting the RestartPolicy as "Always" for the init container will have the following effect: + /// this init container will be continually restarted on + /// exit until all regular containers have terminated. Once all regular + /// containers have completed, all init containers with restartPolicy "Always" + /// will be shut down. This lifecycle differs from normal init containers and + /// is often referred to as a "sidecar" container. Although this init + /// container still starts in the init container sequence, it does not wait + /// for the container to complete before proceeding to the next init + /// container. Instead, the next init container starts immediately after this + /// init container is started, or after any startupProbe has successfully + /// completed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + /// SecurityContext defines the security options the container should be run with. + /// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + /// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, - /// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// StartupProbe indicates that the Pod has successfully initialized. + /// If specified, no other probes are executed until this completes successfully. + /// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + /// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + /// when it might take a long time to load data or warm a cache, than during steady-state operation. + /// This cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupProbe")] pub startup_probe: Option, - /// Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + /// Whether this container should allocate a buffer for stdin in the container runtime. If this + /// is not set, reads from stdin in the container will always result in EOF. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub stdin: Option, - /// Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + /// Whether the container runtime should close the stdin channel after it has been opened by + /// a single attach. When stdin is true the stdin stream will remain open across multiple attach + /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, + /// at which time stdin is closed and remains closed until the container is restarted. If this + /// flag is false, a container processes that reads from stdin will never receive an EOF. + /// Default is false #[serde(default, skip_serializing_if = "Option::is_none", rename = "stdinOnce")] pub stdin_once: Option, - /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + /// Optional: Path at which the file to which the container's termination message + /// will be written is mounted into the container's filesystem. + /// Message written is intended to be brief final status, such as an assertion failure message. + /// Will be truncated by the node if greater than 4096 bytes. The total message length across + /// all containers will be limited to 12kb. + /// Defaults to /dev/termination-log. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePath")] pub termination_message_path: Option, - /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + /// Indicate how the termination message should be populated. File will use the contents of + /// terminationMessagePath to populate the container status message on both success and failure. + /// FallbackToLogsOnError will use the last chunk of container log output if the termination + /// message file is empty and the container exited with an error. + /// The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + /// Defaults to File. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePolicy")] pub termination_message_policy: Option, - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub tty: Option, /// volumeDevices is the list of block devices to be used by the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeDevices")] pub volume_devices: Option>, - /// Pod volumes to mount into the container's filesystem. Cannot be updated. + /// Pod volumes to mount into the container's filesystem. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] pub volume_mounts: Option>, - /// Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + /// Container's working directory. + /// If not specified, the container runtime's default will be used, which + /// might be configured in the container image. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workingDir")] pub working_dir: Option, } @@ -870,7 +1557,15 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainers { pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnv { /// Name of the environment variable. Must be a C_IDENTIFIER. pub name: String, - /// Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + /// Variable references $(VAR_NAME) are expanded + /// using the previously defined environment variables in the container and + /// any service environment variables. If a variable cannot be resolved, + /// the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + /// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + /// Escaped references will never be expanded, regardless of whether the variable + /// exists or not. + /// Defaults to "". #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, /// Source for the environment variable's value. Cannot be used if value is not empty. @@ -884,10 +1579,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFrom { /// Selects a key of a ConfigMap. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMapKeyRef")] pub config_map_key_ref: Option, - /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + /// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, /// Selects a key of a secret in the pod's namespace @@ -900,7 +1597,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFrom { pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFromConfigMapKeyRef { /// The key to select. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap or its key must be defined @@ -908,7 +1607,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFromCon pub optional: Option, } -/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. +/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, +/// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFromFieldRef { /// Version of the schema the FieldPath is written in terms of, defaults to "v1". @@ -919,7 +1619,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFromFie pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFromResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -937,7 +1638,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFromRes pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvValueFromSecretKeyRef { /// The key of the secret to select from. Must be a valid secret key. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret or its key must be defined @@ -962,7 +1665,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvFrom { /// The ConfigMap to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvFromConfigMapRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap must be defined @@ -973,7 +1678,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvFromConfigMa /// The Secret to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvFromSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret must be defined @@ -981,18 +1688,33 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersEnvFromSecretRe pub optional: Option, } -/// Actions that the management system should take in response to container lifecycle events. Cannot be updated. +/// Actions that the management system should take in response to container lifecycle events. +/// Cannot be updated. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecycle { - /// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PostStart is called immediately after a container is created. If the handler fails, + /// the container is terminated and restarted according to its restart policy. + /// Other management of the container blocks until the hook completes. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "postStart")] pub post_start: Option, - /// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PreStop is called immediately before a container is terminated due to an + /// API request or management event such as liveness/startup probe failure, + /// preemption, resource contention, etc. The handler is not called if the + /// container crashes or exits. The Pod's termination grace period countdown begins before the + /// PreStop hook is executed. Regardless of the outcome of the handler, the + /// container will eventually terminate within the Pod's termination grace + /// period (unless delayed by finalizers). Other management of the container blocks until the hook completes + /// or until the termination grace period is reached. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "preStop")] pub pre_stop: Option, } -/// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PostStart is called immediately after a container is created. If the handler fails, +/// the container is terminated and restarted according to its restart policy. +/// Other management of the container blocks until the hook completes. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostStart { /// Exec specifies the action to take. @@ -1004,7 +1726,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostSt /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -1012,7 +1736,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostSt /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostStartExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1020,7 +1748,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostSt /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostStartHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1029,9 +1758,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostSt /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1039,7 +1771,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostSt /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostStartHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1052,17 +1785,29 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostSt pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePostStartTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PreStop is called immediately before a container is terminated due to an +/// API request or management event such as liveness/startup probe failure, +/// preemption, resource contention, etc. The handler is not called if the +/// container crashes or exits. The Pod's termination grace period countdown begins before the +/// PreStop hook is executed. Regardless of the outcome of the handler, the +/// container will eventually terminate within the Pod's termination grace +/// period (unless delayed by finalizers). Other management of the container blocks until the hook completes +/// or until the termination grace period is reached. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreStop { /// Exec specifies the action to take. @@ -1074,7 +1819,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreSto /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -1082,7 +1829,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreSto /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreStopExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1090,7 +1841,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreSto /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreStopHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1099,9 +1851,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreSto /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1109,7 +1864,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreSto /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreStopHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1122,23 +1878,31 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreSto pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLifecyclePreStopTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container liveness. +/// Container will be restarted if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1147,22 +1911,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1170,7 +1948,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1180,8 +1962,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeEx pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1189,7 +1974,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeGr /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1198,9 +1984,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeHt /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1208,7 +1997,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeHt /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1220,37 +2010,50 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersLivenessProbeTc /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerPort represents a network port in a single container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersPorts { - /// Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + /// Number of port to expose on the pod's IP address. + /// This must be a valid port number, 0 < x < 65536. #[serde(rename = "containerPort")] pub container_port: i32, /// What host IP to bind the external port to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIP")] pub host_ip: Option, - /// Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + /// Number of port to expose on the host. + /// If specified, this must be a valid port number, 0 < x < 65536. + /// If HostNetwork is specified, this must match ContainerPort. + /// Most containers do not need this. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPort")] pub host_port: Option, - /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + /// named port in a pod must have a unique name. Name for the port that can be + /// referred to by services. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + /// Protocol for port. Must be UDP, TCP, or SCTP. + /// Defaults to "TCP". #[serde(default, skip_serializing_if = "Option::is_none")] pub protocol: Option, } -/// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container service readiness. +/// Container will be removed from service endpoints if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1259,22 +2062,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbe /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1282,7 +2099,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbe /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1292,8 +2113,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeE pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1301,7 +2125,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeG /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1310,9 +2135,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeH /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1320,7 +2148,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeH /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1332,33 +2161,49 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersReadinessProbeT /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerResizePolicy represents resource resize policy for the container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersResizePolicy { - /// Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + /// Name of the resource to which this resource resize policy applies. + /// Supported values: cpu, memory. #[serde(rename = "resourceName")] pub resource_name: String, - /// Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + /// Restart policy to apply when specified resource is resized. + /// If not specified, it defaults to NotRequired. #[serde(rename = "restartPolicy")] pub restart_policy: String, } -/// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +/// Compute Resources required by this container. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersResources { - /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. It can only be set for containers. + /// Claims lists the names of resources, defined in spec.resourceClaims, + /// that are used by this container. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. It can only be set for containers. #[serde(default, skip_serializing_if = "Option::is_none")] pub claims: Option>, - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -1366,49 +2211,95 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersResources { /// ResourceClaim references one entry in PodSpec.ResourceClaims. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersResourcesClaims { - /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + /// Name must match the name of one entry in pod.spec.resourceClaims of + /// the Pod where this field is used. It makes that resource available + /// inside a container. pub name: String, } -/// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +/// SecurityContext defines the security options the container should be run with. +/// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. +/// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersSecurityContext { - /// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + /// AllowPrivilegeEscalation controls whether a process can gain more + /// privileges than its parent process. This bool directly controls if + /// the no_new_privs flag will be set on the container process. + /// AllowPrivilegeEscalation is true always when the container is: + /// 1) run as Privileged + /// 2) has CAP_SYS_ADMIN + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] pub allow_privilege_escalation: Option, - /// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + /// The capabilities to add/drop when running containers. + /// Defaults to the default set of capabilities granted by the container runtime. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option, - /// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + /// Run container in privileged mode. + /// Processes in privileged containers are essentially equivalent to root on the host. + /// Defaults to false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, - /// procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + /// procMount denotes the type of proc mount to use for the containers. + /// The default is DefaultProcMount which uses the container runtime defaults for + /// readonly paths and masked paths. + /// This requires the ProcMountType feature flag to be enabled. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] pub proc_mount: Option, - /// Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + /// Whether this container has a read-only root filesystem. + /// Default is false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] pub read_only_root_filesystem: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to the container. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by this container. If seccomp options are + /// provided at both the pod & container level, the container options + /// override the pod options. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options from the PodSecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. +/// The capabilities to add/drop when running containers. +/// Defaults to the default set of capabilities granted by the container runtime. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersSecurityContextCapabilities { /// Added capabilities @@ -1419,7 +2310,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersSecurityContext pub drop: Option>, } -/// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to the container. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in PodSecurityContext. If set in both SecurityContext and +/// PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -1436,42 +2331,71 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersSecurityContext pub user: Option, } -/// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by this container. If seccomp options are +/// provided at both the pod & container level, the container options +/// override the pod options. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options from the PodSecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } -/// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// StartupProbe indicates that the Pod has successfully initialized. +/// If specified, no other probes are executed until this completes successfully. +/// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. +/// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, +/// when it might take a long time to load data or warm a cache, than during steady-state operation. +/// This cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1480,22 +2404,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1503,7 +2441,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1513,8 +2455,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeExe pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1522,7 +2467,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeGrp /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1531,9 +2477,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeHtt /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1541,7 +2490,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeHtt /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1553,7 +2503,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersStartupProbeTcp /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -1570,35 +2522,53 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersVolumeDevices { /// VolumeMount describes a mounting of a Volume within a container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecContainersVolumeMounts { - /// Path within the container at which the volume should be mounted. Must not contain ':'. + /// Path within the container at which the volume should be mounted. Must + /// not contain ':'. #[serde(rename = "mountPath")] pub mount_path: String, - /// mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + /// mountPropagation determines how mounts are propagated from the host + /// to container and the other way around. + /// When not set, MountPropagationNone is used. + /// This field is beta in 1.10. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] pub mount_propagation: Option, /// This must match the Name of a Volume. pub name: String, - /// Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + /// Mounted read-only if true, read-write otherwise (false or unspecified). + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + /// Path within the volume from which the container's volume should be mounted. + /// Defaults to "" (volume's root). #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] pub sub_path: Option, - /// Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + /// Expanded path within the volume from which the container's volume should be mounted. + /// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + /// Defaults to "" (volume's root). + /// SubPathExpr and SubPath are mutually exclusive. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] pub sub_path_expr: Option, } -/// Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. +/// Specifies the DNS parameters of a pod. +/// Parameters specified here will be merged to the generated DNS +/// configuration based on DNSPolicy. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecDnsConfig { - /// A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + /// A list of DNS name server IP addresses. + /// This will be appended to the base nameservers generated from DNSPolicy. + /// Duplicated nameservers will be removed. #[serde(default, skip_serializing_if = "Option::is_none")] pub nameservers: Option>, - /// A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + /// A list of DNS resolver options. + /// This will be merged with the base options generated from DNSPolicy. + /// Duplicated entries will be removed. Resolution options given in Options + /// will override those that appear in the base DNSPolicy. #[serde(default, skip_serializing_if = "Option::is_none")] pub options: Option>, - /// A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + /// A list of DNS search domains for host-name lookup. + /// This will be appended to the base search paths generated from DNSPolicy. + /// Duplicated search paths will be removed. #[serde(default, skip_serializing_if = "Option::is_none")] pub searches: Option>, } @@ -1613,26 +2583,58 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecDnsConfigOptions { pub value: Option, } -/// An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. -/// To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. +/// An EphemeralContainer is a temporary container that you may add to an existing Pod for +/// user-initiated activities such as debugging. Ephemeral containers have no resource or +/// scheduling guarantees, and they will not be restarted when they exit or when a Pod is +/// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the +/// Pod to exceed its resource allocation. +/// +/// +/// To add an ephemeral container, use the ephemeralcontainers subresource of an existing +/// Pod. Ephemeral containers may not be removed or restarted. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainers { - /// Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Arguments to the entrypoint. + /// The image's CMD is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub args: Option>, - /// Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Entrypoint array. Not executed within a shell. + /// The image's ENTRYPOINT is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, - /// List of environment variables to set in the container. Cannot be updated. + /// List of environment variables to set in the container. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub env: Option>, - /// List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + /// List of sources to populate environment variables in the container. + /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys + /// will be reported as an event when the container is starting. When a key exists in multiple + /// sources, the value associated with the last source will take precedence. + /// Values defined by an Env with a duplicate key will take precedence. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "envFrom")] pub env_from: Option>, - /// Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + /// Container image name. + /// More info: https://kubernetes.io/docs/concepts/containers/images #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + /// Image pull policy. + /// One of Always, Never, IfNotPresent. + /// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")] pub image_pull_policy: Option, /// Lifecycle is not allowed for ephemeral containers. @@ -1641,7 +2643,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainers { /// Probes are not allowed for ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + /// Name of the ephemeral container specified as a DNS_LABEL. + /// This name must be unique among all containers, init containers and ephemeral containers. pub name: String, /// Ports are not allowed for ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none")] @@ -1652,44 +2655,79 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainers { /// Resources resize policy for the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resizePolicy")] pub resize_policy: Option>, - /// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + /// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + /// already allocated to the pod. #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, - /// Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers. + /// Restart policy for the container to manage the restart behavior of each + /// container within a pod. + /// This may only be set for init containers. You cannot set this field on + /// ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + /// Optional: SecurityContext defines the security options the ephemeral container should be run with. + /// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, /// Probes are not allowed for ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupProbe")] pub startup_probe: Option, - /// Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + /// Whether this container should allocate a buffer for stdin in the container runtime. If this + /// is not set, reads from stdin in the container will always result in EOF. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub stdin: Option, - /// Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + /// Whether the container runtime should close the stdin channel after it has been opened by + /// a single attach. When stdin is true the stdin stream will remain open across multiple attach + /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, + /// at which time stdin is closed and remains closed until the container is restarted. If this + /// flag is false, a container processes that reads from stdin will never receive an EOF. + /// Default is false #[serde(default, skip_serializing_if = "Option::is_none", rename = "stdinOnce")] pub stdin_once: Option, - /// If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. - /// The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. + /// If set, the name of the container from PodSpec that this ephemeral container targets. + /// The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + /// If not set then the ephemeral container uses the namespaces configured in the Pod spec. + /// + /// + /// The container runtime must implement support for this feature. If the runtime does not + /// support namespace targeting then the result of setting this field is undefined. #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetContainerName")] pub target_container_name: Option, - /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + /// Optional: Path at which the file to which the container's termination message + /// will be written is mounted into the container's filesystem. + /// Message written is intended to be brief final status, such as an assertion failure message. + /// Will be truncated by the node if greater than 4096 bytes. The total message length across + /// all containers will be limited to 12kb. + /// Defaults to /dev/termination-log. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePath")] pub termination_message_path: Option, - /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + /// Indicate how the termination message should be populated. File will use the contents of + /// terminationMessagePath to populate the container status message on both success and failure. + /// FallbackToLogsOnError will use the last chunk of container log output if the termination + /// message file is empty and the container exited with an error. + /// The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + /// Defaults to File. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePolicy")] pub termination_message_policy: Option, - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub tty: Option, /// volumeDevices is the list of block devices to be used by the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeDevices")] pub volume_devices: Option>, - /// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + /// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] pub volume_mounts: Option>, - /// Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + /// Container's working directory. + /// If not specified, the container runtime's default will be used, which + /// might be configured in the container image. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workingDir")] pub working_dir: Option, } @@ -1699,7 +2737,15 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainers { pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnv { /// Name of the environment variable. Must be a C_IDENTIFIER. pub name: String, - /// Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + /// Variable references $(VAR_NAME) are expanded + /// using the previously defined environment variables in the container and + /// any service environment variables. If a variable cannot be resolved, + /// the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + /// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + /// Escaped references will never be expanded, regardless of whether the variable + /// exists or not. + /// Defaults to "". #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, /// Source for the environment variable's value. Cannot be used if value is not empty. @@ -1713,10 +2759,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvVal /// Selects a key of a ConfigMap. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMapKeyRef")] pub config_map_key_ref: Option, - /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + /// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, /// Selects a key of a secret in the pod's namespace @@ -1729,7 +2777,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvVal pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvValueFromConfigMapKeyRef { /// The key to select. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap or its key must be defined @@ -1737,7 +2787,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvVal pub optional: Option, } -/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. +/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, +/// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvValueFromFieldRef { /// Version of the schema the FieldPath is written in terms of, defaults to "v1". @@ -1748,7 +2799,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvVal pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvValueFromResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -1766,7 +2818,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvVal pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvValueFromSecretKeyRef { /// The key of the secret to select from. Must be a valid secret key. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret or its key must be defined @@ -1791,7 +2845,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvFro /// The ConfigMap to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvFromConfigMapRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap must be defined @@ -1802,7 +2858,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvFro /// The Secret to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvFromSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret must be defined @@ -1813,15 +2871,29 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersEnvFro /// Lifecycle is not allowed for ephemeral containers. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecycle { - /// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PostStart is called immediately after a container is created. If the handler fails, + /// the container is terminated and restarted according to its restart policy. + /// Other management of the container blocks until the hook completes. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "postStart")] pub post_start: Option, - /// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PreStop is called immediately before a container is terminated due to an + /// API request or management event such as liveness/startup probe failure, + /// preemption, resource contention, etc. The handler is not called if the + /// container crashes or exits. The Pod's termination grace period countdown begins before the + /// PreStop hook is executed. Regardless of the outcome of the handler, the + /// container will eventually terminate within the Pod's termination grace + /// period (unless delayed by finalizers). Other management of the container blocks until the hook completes + /// or until the termination grace period is reached. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "preStop")] pub pre_stop: Option, } -/// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PostStart is called immediately after a container is created. If the handler fails, +/// the container is terminated and restarted according to its restart policy. +/// Other management of the container blocks until the hook completes. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePostStart { /// Exec specifies the action to take. @@ -1833,7 +2905,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -1841,7 +2915,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePostStartExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1849,7 +2927,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePostStartHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1858,9 +2937,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1868,7 +2950,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1881,17 +2964,29 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePostStartTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PreStop is called immediately before a container is terminated due to an +/// API request or management event such as liveness/startup probe failure, +/// preemption, resource contention, etc. The handler is not called if the +/// container crashes or exits. The Pod's termination grace period countdown begins before the +/// PreStop hook is executed. Regardless of the outcome of the handler, the +/// container will eventually terminate within the Pod's termination grace +/// period (unless delayed by finalizers). Other management of the container blocks until the hook completes +/// or until the termination grace period is reached. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePreStop { /// Exec specifies the action to take. @@ -1903,7 +2998,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -1911,7 +3008,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePreStopExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1919,7 +3020,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePreStopHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1928,9 +3030,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1938,7 +3043,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1951,13 +3057,17 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecy pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLifecyclePreStopTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -1967,7 +3077,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1976,22 +3087,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1999,7 +3124,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivenessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2009,8 +3138,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivenessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2018,7 +3150,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivenessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2027,9 +3160,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2037,7 +3173,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivenessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2049,26 +3186,35 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersLivene /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerPort represents a network port in a single container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersPorts { - /// Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + /// Number of port to expose on the pod's IP address. + /// This must be a valid port number, 0 < x < 65536. #[serde(rename = "containerPort")] pub container_port: i32, /// What host IP to bind the external port to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIP")] pub host_ip: Option, - /// Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + /// Number of port to expose on the host. + /// If specified, this must be a valid port number, 0 < x < 65536. + /// If HostNetwork is specified, this must match ContainerPort. + /// Most containers do not need this. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPort")] pub host_port: Option, - /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + /// named port in a pod must have a unique name. Name for the port that can be + /// referred to by services. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + /// Protocol for port. Must be UDP, TCP, or SCTP. + /// Defaults to "TCP". #[serde(default, skip_serializing_if = "Option::is_none")] pub protocol: Option, } @@ -2079,7 +3225,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2088,22 +3235,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -2111,7 +3272,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadinessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2121,8 +3286,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadinessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2130,7 +3298,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadinessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2139,9 +3308,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2149,7 +3321,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadinessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2161,33 +3334,48 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersReadin /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerResizePolicy represents resource resize policy for the container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersResizePolicy { - /// Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + /// Name of the resource to which this resource resize policy applies. + /// Supported values: cpu, memory. #[serde(rename = "resourceName")] pub resource_name: String, - /// Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + /// Restart policy to apply when specified resource is resized. + /// If not specified, it defaults to NotRequired. #[serde(rename = "restartPolicy")] pub restart_policy: String, } -/// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. +/// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources +/// already allocated to the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersResources { - /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. It can only be set for containers. + /// Claims lists the names of resources, defined in spec.resourceClaims, + /// that are used by this container. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. It can only be set for containers. #[serde(default, skip_serializing_if = "Option::is_none")] pub claims: Option>, - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -2195,49 +3383,94 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersResour /// ResourceClaim references one entry in PodSpec.ResourceClaims. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersResourcesClaims { - /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + /// Name must match the name of one entry in pod.spec.resourceClaims of + /// the Pod where this field is used. It makes that resource available + /// inside a container. pub name: String, } -/// Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. +/// Optional: SecurityContext defines the security options the ephemeral container should be run with. +/// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersSecurityContext { - /// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + /// AllowPrivilegeEscalation controls whether a process can gain more + /// privileges than its parent process. This bool directly controls if + /// the no_new_privs flag will be set on the container process. + /// AllowPrivilegeEscalation is true always when the container is: + /// 1) run as Privileged + /// 2) has CAP_SYS_ADMIN + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] pub allow_privilege_escalation: Option, - /// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + /// The capabilities to add/drop when running containers. + /// Defaults to the default set of capabilities granted by the container runtime. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option, - /// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + /// Run container in privileged mode. + /// Processes in privileged containers are essentially equivalent to root on the host. + /// Defaults to false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, - /// procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + /// procMount denotes the type of proc mount to use for the containers. + /// The default is DefaultProcMount which uses the container runtime defaults for + /// readonly paths and masked paths. + /// This requires the ProcMountType feature flag to be enabled. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] pub proc_mount: Option, - /// Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + /// Whether this container has a read-only root filesystem. + /// Default is false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] pub read_only_root_filesystem: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to the container. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by this container. If seccomp options are + /// provided at both the pod & container level, the container options + /// override the pod options. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options from the PodSecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. +/// The capabilities to add/drop when running containers. +/// Defaults to the default set of capabilities granted by the container runtime. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersSecurityContextCapabilities { /// Added capabilities @@ -2248,7 +3481,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersSecuri pub drop: Option>, } -/// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to the container. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in PodSecurityContext. If set in both SecurityContext and +/// PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -2265,31 +3502,53 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersSecuri pub user: Option, } -/// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by this container. If seccomp options are +/// provided at both the pod & container level, the container options +/// override the pod options. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options from the PodSecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } @@ -2300,7 +3559,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2309,22 +3569,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -2332,7 +3606,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartupProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2342,8 +3620,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartupProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2351,7 +3632,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartupProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2360,9 +3642,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2370,7 +3655,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartupProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2382,7 +3668,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersStartu /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -2399,26 +3687,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersVolume /// VolumeMount describes a mounting of a Volume within a container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecEphemeralContainersVolumeMounts { - /// Path within the container at which the volume should be mounted. Must not contain ':'. + /// Path within the container at which the volume should be mounted. Must + /// not contain ':'. #[serde(rename = "mountPath")] pub mount_path: String, - /// mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + /// mountPropagation determines how mounts are propagated from the host + /// to container and the other way around. + /// When not set, MountPropagationNone is used. + /// This field is beta in 1.10. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] pub mount_propagation: Option, /// This must match the Name of a Volume. pub name: String, - /// Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + /// Mounted read-only if true, read-write otherwise (false or unspecified). + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + /// Path within the volume from which the container's volume should be mounted. + /// Defaults to "" (volume's root). #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] pub sub_path: Option, - /// Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + /// Expanded path within the volume from which the container's volume should be mounted. + /// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + /// Defaults to "" (volume's root). + /// SubPathExpr and SubPath are mutually exclusive. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] pub sub_path_expr: Option, } -/// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. +/// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +/// pod's hosts file. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecHostAliases { /// Hostnames for the above IP address. @@ -2429,10 +3727,13 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecHostAliases { pub ip: Option, } -/// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +/// LocalObjectReference contains enough information to let you locate the +/// referenced object inside the same namespace. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecImagePullSecrets { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -2440,75 +3741,166 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecImagePullSecrets { /// A single application container that you want to run within a pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainers { - /// Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Arguments to the entrypoint. + /// The container image's CMD is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub args: Option>, - /// Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Entrypoint array. Not executed within a shell. + /// The container image's ENTRYPOINT is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, - /// List of environment variables to set in the container. Cannot be updated. + /// List of environment variables to set in the container. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub env: Option>, - /// List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + /// List of sources to populate environment variables in the container. + /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys + /// will be reported as an event when the container is starting. When a key exists in multiple + /// sources, the value associated with the last source will take precedence. + /// Values defined by an Env with a duplicate key will take precedence. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "envFrom")] pub env_from: Option>, - /// Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + /// Container image name. + /// More info: https://kubernetes.io/docs/concepts/containers/images + /// This field is optional to allow higher level config management to default or override + /// container images in workload controllers like Deployments and StatefulSets. #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + /// Image pull policy. + /// One of Always, Never, IfNotPresent. + /// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")] pub image_pull_policy: Option, - /// Actions that the management system should take in response to container lifecycle events. Cannot be updated. + /// Actions that the management system should take in response to container lifecycle events. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub lifecycle: Option, - /// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container liveness. + /// Container will be restarted if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + /// Name of the container specified as a DNS_LABEL. + /// Each container in a pod must have a unique name (DNS_LABEL). + /// Cannot be updated. pub name: String, - /// List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + /// List of ports to expose from the container. Not specifying a port here + /// DOES NOT prevent that port from being exposed. Any port which is + /// listening on the default "0.0.0.0" address inside a container will be + /// accessible from the network. + /// Modifying this array with strategic merge patch may corrupt the data. + /// For more information See https://github.com/kubernetes/kubernetes/issues/108255. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub ports: Option>, - /// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container service readiness. + /// Container will be removed from service endpoints if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessProbe")] pub readiness_probe: Option, /// Resources resize policy for the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resizePolicy")] pub resize_policy: Option>, - /// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Compute Resources required by this container. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, - /// RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. + /// RestartPolicy defines the restart behavior of individual containers in a pod. + /// This field may only be set for init containers, and the only allowed value is "Always". + /// For non-init containers or when this field is not specified, + /// the restart behavior is defined by the Pod's restart policy and the container type. + /// Setting the RestartPolicy as "Always" for the init container will have the following effect: + /// this init container will be continually restarted on + /// exit until all regular containers have terminated. Once all regular + /// containers have completed, all init containers with restartPolicy "Always" + /// will be shut down. This lifecycle differs from normal init containers and + /// is often referred to as a "sidecar" container. Although this init + /// container still starts in the init container sequence, it does not wait + /// for the container to complete before proceeding to the next init + /// container. Instead, the next init container starts immediately after this + /// init container is started, or after any startupProbe has successfully + /// completed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + /// SecurityContext defines the security options the container should be run with. + /// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + /// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, - /// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// StartupProbe indicates that the Pod has successfully initialized. + /// If specified, no other probes are executed until this completes successfully. + /// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + /// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + /// when it might take a long time to load data or warm a cache, than during steady-state operation. + /// This cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupProbe")] pub startup_probe: Option, - /// Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + /// Whether this container should allocate a buffer for stdin in the container runtime. If this + /// is not set, reads from stdin in the container will always result in EOF. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub stdin: Option, - /// Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + /// Whether the container runtime should close the stdin channel after it has been opened by + /// a single attach. When stdin is true the stdin stream will remain open across multiple attach + /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, + /// at which time stdin is closed and remains closed until the container is restarted. If this + /// flag is false, a container processes that reads from stdin will never receive an EOF. + /// Default is false #[serde(default, skip_serializing_if = "Option::is_none", rename = "stdinOnce")] pub stdin_once: Option, - /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + /// Optional: Path at which the file to which the container's termination message + /// will be written is mounted into the container's filesystem. + /// Message written is intended to be brief final status, such as an assertion failure message. + /// Will be truncated by the node if greater than 4096 bytes. The total message length across + /// all containers will be limited to 12kb. + /// Defaults to /dev/termination-log. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePath")] pub termination_message_path: Option, - /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + /// Indicate how the termination message should be populated. File will use the contents of + /// terminationMessagePath to populate the container status message on both success and failure. + /// FallbackToLogsOnError will use the last chunk of container log output if the termination + /// message file is empty and the container exited with an error. + /// The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + /// Defaults to File. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePolicy")] pub termination_message_policy: Option, - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub tty: Option, /// volumeDevices is the list of block devices to be used by the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeDevices")] pub volume_devices: Option>, - /// Pod volumes to mount into the container's filesystem. Cannot be updated. + /// Pod volumes to mount into the container's filesystem. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] pub volume_mounts: Option>, - /// Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + /// Container's working directory. + /// If not specified, the container runtime's default will be used, which + /// might be configured in the container image. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workingDir")] pub working_dir: Option, } @@ -2518,7 +3910,15 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainers { pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnv { /// Name of the environment variable. Must be a C_IDENTIFIER. pub name: String, - /// Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + /// Variable references $(VAR_NAME) are expanded + /// using the previously defined environment variables in the container and + /// any service environment variables. If a variable cannot be resolved, + /// the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + /// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + /// Escaped references will never be expanded, regardless of whether the variable + /// exists or not. + /// Defaults to "". #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, /// Source for the environment variable's value. Cannot be used if value is not empty. @@ -2532,10 +3932,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFro /// Selects a key of a ConfigMap. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMapKeyRef")] pub config_map_key_ref: Option, - /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + /// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, /// Selects a key of a secret in the pod's namespace @@ -2548,7 +3950,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFro pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFromConfigMapKeyRef { /// The key to select. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap or its key must be defined @@ -2556,7 +3960,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFro pub optional: Option, } -/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. +/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, +/// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFromFieldRef { /// Version of the schema the FieldPath is written in terms of, defaults to "v1". @@ -2567,7 +3972,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFro pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFromResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -2585,7 +3991,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFro pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvValueFromSecretKeyRef { /// The key of the secret to select from. Must be a valid secret key. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret or its key must be defined @@ -2610,7 +4018,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvFrom { /// The ConfigMap to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvFromConfigMapRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap must be defined @@ -2621,7 +4031,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvFromConf /// The Secret to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvFromSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret must be defined @@ -2629,18 +4041,33 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersEnvFromSecr pub optional: Option, } -/// Actions that the management system should take in response to container lifecycle events. Cannot be updated. +/// Actions that the management system should take in response to container lifecycle events. +/// Cannot be updated. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecycle { - /// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PostStart is called immediately after a container is created. If the handler fails, + /// the container is terminated and restarted according to its restart policy. + /// Other management of the container blocks until the hook completes. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "postStart")] pub post_start: Option, - /// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PreStop is called immediately before a container is terminated due to an + /// API request or management event such as liveness/startup probe failure, + /// preemption, resource contention, etc. The handler is not called if the + /// container crashes or exits. The Pod's termination grace period countdown begins before the + /// PreStop hook is executed. Regardless of the outcome of the handler, the + /// container will eventually terminate within the Pod's termination grace + /// period (unless delayed by finalizers). Other management of the container blocks until the hook completes + /// or until the termination grace period is reached. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "preStop")] pub pre_stop: Option, } -/// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PostStart is called immediately after a container is created. If the handler fails, +/// the container is terminated and restarted according to its restart policy. +/// Other management of the container blocks until the hook completes. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePostStart { /// Exec specifies the action to take. @@ -2652,7 +4079,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePo /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -2660,7 +4089,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePo /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePostStartExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2668,7 +4101,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePo /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePostStartHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2677,9 +4111,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePo /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2687,7 +4124,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePo /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2700,17 +4138,29 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePo pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePostStartTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PreStop is called immediately before a container is terminated due to an +/// API request or management event such as liveness/startup probe failure, +/// preemption, resource contention, etc. The handler is not called if the +/// container crashes or exits. The Pod's termination grace period countdown begins before the +/// PreStop hook is executed. Regardless of the outcome of the handler, the +/// container will eventually terminate within the Pod's termination grace +/// period (unless delayed by finalizers). Other management of the container blocks until the hook completes +/// or until the termination grace period is reached. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePreStop { /// Exec specifies the action to take. @@ -2722,7 +4172,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePr /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -2730,7 +4182,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePr /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePreStopExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2738,7 +4194,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePr /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePreStopHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2747,9 +4204,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePr /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2757,7 +4217,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePr /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2770,23 +4231,31 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePr pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLifecyclePreStopTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container liveness. +/// Container will be restarted if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2795,22 +4264,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessPro /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -2818,7 +4301,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessPro /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2828,8 +4315,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessPro pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2837,7 +4327,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessPro /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2846,9 +4337,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessPro /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2856,7 +4350,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessPro /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2868,37 +4363,50 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersLivenessPro /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerPort represents a network port in a single container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersPorts { - /// Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + /// Number of port to expose on the pod's IP address. + /// This must be a valid port number, 0 < x < 65536. #[serde(rename = "containerPort")] pub container_port: i32, /// What host IP to bind the external port to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIP")] pub host_ip: Option, - /// Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + /// Number of port to expose on the host. + /// If specified, this must be a valid port number, 0 < x < 65536. + /// If HostNetwork is specified, this must match ContainerPort. + /// Most containers do not need this. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPort")] pub host_port: Option, - /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + /// named port in a pod must have a unique name. Name for the port that can be + /// referred to by services. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + /// Protocol for port. Must be UDP, TCP, or SCTP. + /// Defaults to "TCP". #[serde(default, skip_serializing_if = "Option::is_none")] pub protocol: Option, } -/// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container service readiness. +/// Container will be removed from service endpoints if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2907,22 +4415,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessPr /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -2930,7 +4452,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessPr /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2940,8 +4466,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessPr pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2949,7 +4478,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessPr /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2958,9 +4488,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessPr /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2968,7 +4501,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessPr /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2980,33 +4514,49 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersReadinessPr /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerResizePolicy represents resource resize policy for the container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersResizePolicy { - /// Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + /// Name of the resource to which this resource resize policy applies. + /// Supported values: cpu, memory. #[serde(rename = "resourceName")] pub resource_name: String, - /// Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + /// Restart policy to apply when specified resource is resized. + /// If not specified, it defaults to NotRequired. #[serde(rename = "restartPolicy")] pub restart_policy: String, } -/// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +/// Compute Resources required by this container. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersResources { - /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. It can only be set for containers. + /// Claims lists the names of resources, defined in spec.resourceClaims, + /// that are used by this container. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. It can only be set for containers. #[serde(default, skip_serializing_if = "Option::is_none")] pub claims: Option>, - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -3014,49 +4564,95 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersResources { /// ResourceClaim references one entry in PodSpec.ResourceClaims. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersResourcesClaims { - /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + /// Name must match the name of one entry in pod.spec.resourceClaims of + /// the Pod where this field is used. It makes that resource available + /// inside a container. pub name: String, } -/// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +/// SecurityContext defines the security options the container should be run with. +/// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. +/// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersSecurityContext { - /// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + /// AllowPrivilegeEscalation controls whether a process can gain more + /// privileges than its parent process. This bool directly controls if + /// the no_new_privs flag will be set on the container process. + /// AllowPrivilegeEscalation is true always when the container is: + /// 1) run as Privileged + /// 2) has CAP_SYS_ADMIN + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] pub allow_privilege_escalation: Option, - /// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + /// The capabilities to add/drop when running containers. + /// Defaults to the default set of capabilities granted by the container runtime. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option, - /// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + /// Run container in privileged mode. + /// Processes in privileged containers are essentially equivalent to root on the host. + /// Defaults to false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, - /// procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + /// procMount denotes the type of proc mount to use for the containers. + /// The default is DefaultProcMount which uses the container runtime defaults for + /// readonly paths and masked paths. + /// This requires the ProcMountType feature flag to be enabled. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] pub proc_mount: Option, - /// Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + /// Whether this container has a read-only root filesystem. + /// Default is false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] pub read_only_root_filesystem: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to the container. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by this container. If seccomp options are + /// provided at both the pod & container level, the container options + /// override the pod options. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options from the PodSecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. +/// The capabilities to add/drop when running containers. +/// Defaults to the default set of capabilities granted by the container runtime. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersSecurityContextCapabilities { /// Added capabilities @@ -3067,7 +4663,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersSecurityCon pub drop: Option>, } -/// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to the container. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in PodSecurityContext. If set in both SecurityContext and +/// PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -3084,42 +4684,71 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersSecurityCon pub user: Option, } -/// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by this container. If seccomp options are +/// provided at both the pod & container level, the container options +/// override the pod options. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options from the PodSecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } -/// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// StartupProbe indicates that the Pod has successfully initialized. +/// If specified, no other probes are executed until this completes successfully. +/// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. +/// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, +/// when it might take a long time to load data or warm a cache, than during steady-state operation. +/// This cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -3128,22 +4757,36 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProb /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -3151,7 +4794,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProb /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -3161,8 +4808,11 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProb pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -3170,7 +4820,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProb /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -3179,9 +4830,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProb /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -3189,7 +4843,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProb /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -3201,7 +4856,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersStartupProb /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -3218,31 +4875,70 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersVolumeDevic /// VolumeMount describes a mounting of a Volume within a container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecInitContainersVolumeMounts { - /// Path within the container at which the volume should be mounted. Must not contain ':'. + /// Path within the container at which the volume should be mounted. Must + /// not contain ':'. #[serde(rename = "mountPath")] pub mount_path: String, - /// mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + /// mountPropagation determines how mounts are propagated from the host + /// to container and the other way around. + /// When not set, MountPropagationNone is used. + /// This field is beta in 1.10. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] pub mount_propagation: Option, /// This must match the Name of a Volume. pub name: String, - /// Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + /// Mounted read-only if true, read-write otherwise (false or unspecified). + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + /// Path within the volume from which the container's volume should be mounted. + /// Defaults to "" (volume's root). #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] pub sub_path: Option, - /// Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + /// Expanded path within the volume from which the container's volume should be mounted. + /// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + /// Defaults to "" (volume's root). + /// SubPathExpr and SubPath are mutually exclusive. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] pub sub_path_expr: Option, } -/// Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. -/// If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions -/// If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup +/// Specifies the OS of the containers in the pod. +/// Some pod and container fields are restricted if this is set. +/// +/// +/// If the OS field is set to linux, the following fields must be unset: +/// -securityContext.windowsOptions +/// +/// +/// If the OS field is set to windows, following fields must be unset: +/// - spec.hostPID +/// - spec.hostIPC +/// - spec.hostUsers +/// - spec.securityContext.seLinuxOptions +/// - spec.securityContext.seccompProfile +/// - spec.securityContext.fsGroup +/// - spec.securityContext.fsGroupChangePolicy +/// - spec.securityContext.sysctls +/// - spec.shareProcessNamespace +/// - spec.securityContext.runAsUser +/// - spec.securityContext.runAsGroup +/// - spec.securityContext.supplementalGroups +/// - spec.containers[*].securityContext.seLinuxOptions +/// - spec.containers[*].securityContext.seccompProfile +/// - spec.containers[*].securityContext.capabilities +/// - spec.containers[*].securityContext.readOnlyRootFilesystem +/// - spec.containers[*].securityContext.privileged +/// - spec.containers[*].securityContext.allowPrivilegeEscalation +/// - spec.containers[*].securityContext.procMount +/// - spec.containers[*].securityContext.runAsUser +/// - spec.containers[*].securityContext.runAsGroup #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecOs { - /// Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null + /// Name is the name of the operating system. The currently supported values are linux and windows. + /// Additional value may be defined in future and can be one of: + /// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + /// Clients should expect to handle additional values and treat unrecognized values in this field as os: null pub name: String, } @@ -3254,10 +4950,13 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecReadinessGates { pub condition_type: String, } -/// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. +/// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +/// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. +/// Containers that need access to the ResourceClaim reference it with this name. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecResourceClaims { - /// Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + /// Name uniquely identifies this resource claim inside the pod. + /// This must be a DNS_LABEL. pub name: String, /// Source describes where to find the ResourceClaim. #[serde(default, skip_serializing_if = "Option::is_none")] @@ -3267,12 +4966,24 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecResourceClaims { /// Source describes where to find the ResourceClaim. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecResourceClaimsSource { - /// ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + /// ResourceClaimName is the name of a ResourceClaim object in the same + /// namespace as this pod. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaimName")] pub resource_claim_name: Option, - /// ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. - /// The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - /// This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. + /// ResourceClaimTemplateName is the name of a ResourceClaimTemplate + /// object in the same namespace as this pod. + /// + /// + /// The template will be used to create a new ResourceClaim, which will + /// be bound to this pod. When this pod is deleted, the ResourceClaim + /// will also be deleted. The pod name and resource name, along with a + /// generated component, will be used to form a unique name for the + /// ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + /// + /// + /// This field is immutable and no changes will be made to the + /// corresponding ResourceClaim by the control plane after creating the + /// ResourceClaim. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaimTemplateName")] pub resource_claim_template_name: Option, } @@ -3280,48 +4991,102 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecResourceClaimsSource { /// PodSchedulingGate is associated to a Pod to guard its scheduling. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecSchedulingGates { - /// Name of the scheduling gate. Each scheduling gate must have a unique name field. + /// Name of the scheduling gate. + /// Each scheduling gate must have a unique name field. pub name: String, } -/// SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. +/// SecurityContext holds pod-level security attributes and common container settings. +/// Optional: Defaults to empty. See type description for default values of each field. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecSecurityContext { - /// A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: - /// 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- - /// If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. + /// A special supplemental group that applies to all containers in a pod. + /// Some volume types allow the Kubelet to change the ownership of that volume + /// to be owned by the pod: + /// + /// + /// 1. The owning GID will be the FSGroup + /// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + /// 3. The permission bits are OR'd with rw-rw---- + /// + /// + /// If unset, the Kubelet will not modify the ownership and permissions of any volume. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsGroup")] pub fs_group: Option, - /// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows. + /// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + /// before being exposed inside Pod. This field will only apply to + /// volume types which support fsGroup based ownership(and permissions). + /// It will have no effect on ephemeral volume types such as: secret, configmaps + /// and emptydir. + /// Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsGroupChangePolicy")] pub fs_group_change_policy: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in SecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence + /// for that container. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in SecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in SecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence + /// for that container. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to all containers. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in SecurityContext. If set in + /// both SecurityContext and PodSecurityContext, the value specified in SecurityContext + /// takes precedence for that container. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by the containers in this pod. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. + /// A list of groups applied to the first process run in each container, in addition + /// to the container's primary GID, the fsGroup (if specified), and group memberships + /// defined in the container image for the uid of the container process. If unspecified, + /// no additional groups are added to any container. Note that group memberships + /// defined in the container image for the uid of the container process are still effective, + /// even if they are not included in this list. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "supplementalGroups")] pub supplemental_groups: Option>, - /// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + /// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + /// sysctls (by the container runtime) might fail to launch. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub sysctls: Option>, - /// The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options within a container's SecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to all containers. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in SecurityContext. If set in +/// both SecurityContext and PodSecurityContext, the value specified in SecurityContext +/// takes precedence for that container. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -3338,14 +5103,23 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecSecurityContextSeLinuxOpt pub user: Option, } -/// The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by the containers in this pod. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } @@ -3359,39 +5133,60 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecSecurityContextSysctls { pub value: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options within a container's SecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } -/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +/// The pod this Toleration is attached to tolerates any taint that matches +/// the triple using the matching operator . #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecTolerations { - /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + /// Effect indicates the taint effect to match. Empty means match all taint effects. + /// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. #[serde(default, skip_serializing_if = "Option::is_none")] pub effect: Option, - /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. + /// If the key is empty, operator must be Exists; this combination means to match all values and all keys. #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option, - /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + /// Operator represents a key's relationship to the value. + /// Valid operators are Exists and Equal. Defaults to Equal. + /// Exists is equivalent to wildcard for value, so that a pod can + /// tolerate all taints of a particular category. #[serde(default, skip_serializing_if = "Option::is_none")] pub operator: Option, - /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + /// TolerationSeconds represents the period of time the toleration (which must be + /// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + /// it is not set, which means tolerate the taint forever (do not evict). Zero and + /// negative values will be treated as 0 (evict immediately) by the system. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] pub toleration_seconds: Option, - /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + /// Value is the taint value the toleration matches to. + /// If the operator is Exists, the value should be empty, otherwise just a regular string. #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, } @@ -3399,56 +5194,151 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecTolerations { /// TopologySpreadConstraint specifies how to spread matching pods among the given topology. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecTopologySpreadConstraints { - /// LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + /// LabelSelector is used to find matching pods. + /// Pods that match this label selector are counted to determine the number of pods + /// in their corresponding topology domain. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. - /// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + /// MatchLabelKeys is a set of pod label keys to select the pods over which + /// spreading will be calculated. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are ANDed with labelSelector + /// to select the group of existing pods over which spreading will be calculated + /// for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// MatchLabelKeys cannot be set when LabelSelector isn't set. + /// Keys that don't exist in the incoming pod labels will + /// be ignored. A null or empty list means only match against labelSelector. + /// + /// + /// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. + /// MaxSkew describes the degree to which pods may be unevenly distributed. + /// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + /// between the number of matching pods in the target topology and the global minimum. + /// The global minimum is the minimum number of matching pods in an eligible domain + /// or zero if the number of eligible domains is less than MinDomains. + /// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + /// labelSelector spread as 2/2/1: + /// In this case, the global minimum is 1. + /// | zone1 | zone2 | zone3 | + /// | P P | P P | P | + /// - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + /// scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + /// violate MaxSkew(1). + /// - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + /// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + /// to topologies that satisfy it. + /// It's a required field. Default value is 1 and 0 is not allowed. #[serde(rename = "maxSkew")] pub max_skew: i32, - /// MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - /// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - /// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + /// MinDomains indicates a minimum number of eligible domains. + /// When the number of eligible domains with matching topology keys is less than minDomains, + /// Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + /// And when the number of eligible domains with matching topology keys equals or greater than minDomains, + /// this value has no effect on scheduling. + /// As a result, when the number of eligible domains is less than minDomains, + /// scheduler won't schedule more than maxSkew Pods to those domains. + /// If value is nil, the constraint behaves as if MinDomains is equal to 1. + /// Valid values are integers greater than 0. + /// When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + /// + /// + /// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + /// labelSelector spread as 2/2/2: + /// | zone1 | zone2 | zone3 | + /// | P P | P P | P P | + /// The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + /// In this situation, new pod with the same labelSelector cannot be scheduled, + /// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + /// it will violate MaxSkew. + /// + /// + /// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). #[serde(default, skip_serializing_if = "Option::is_none", rename = "minDomains")] pub min_domains: Option, - /// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - /// If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + /// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + /// when calculating pod topology spread skew. Options are: + /// - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + /// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + /// + /// + /// If this value is nil, the behavior is equivalent to the Honor policy. + /// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeAffinityPolicy")] pub node_affinity_policy: Option, - /// NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. - /// If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + /// NodeTaintsPolicy indicates how we will treat node taints when calculating + /// pod topology spread skew. Options are: + /// - Honor: nodes without taints, along with tainted nodes for which the incoming pod + /// has a toleration, are included. + /// - Ignore: node taints are ignored. All nodes are included. + /// + /// + /// If this value is nil, the behavior is equivalent to the Ignore policy. + /// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeTaintsPolicy")] pub node_taints_policy: Option, - /// TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + /// TopologyKey is the key of node labels. Nodes that have a label with this key + /// and identical values are considered to be in the same topology. + /// We consider each as a "bucket", and try to put balanced number + /// of pods into each bucket. + /// We define a domain as a particular instance of a topology. + /// Also, we define an eligible domain as a domain whose nodes meet the requirements of + /// nodeAffinityPolicy and nodeTaintsPolicy. + /// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + /// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + /// It's a required field. #[serde(rename = "topologyKey")] pub topology_key: String, - /// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. + /// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + /// the spread constraint. + /// - DoNotSchedule (default) tells the scheduler not to schedule it. + /// - ScheduleAnyway tells the scheduler to schedule the pod in any location, + /// but giving higher precedence to topologies that would help reduce the + /// skew. + /// A constraint is considered "Unsatisfiable" for an incoming pod + /// if and only if every possible node assignment for that pod would violate + /// "MaxSkew" on some topology. + /// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + /// labelSelector spread as 3/1/1: + /// | zone1 | zone2 | zone3 | + /// | P P P | P | P | + /// If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + /// to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + /// MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + /// won't make it *more* imbalanced. + /// It's a required field. #[serde(rename = "whenUnsatisfiable")] pub when_unsatisfiable: String, } -/// LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. +/// LabelSelector is used to find matching pods. +/// Pods that match this label selector are counted to determine the number of pods +/// in their corresponding topology domain. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecTopologySpreadConstraintsLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecTopologySpreadConstraintsLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -3456,7 +5346,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecTopologySpreadConstraints /// Volume represents a named volume in a pod that may be accessed by any container in the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumes { - /// awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// awsElasticBlockStore represents an AWS Disk resource that is attached to a + /// kubelet's host machine and then exposed to the pod. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[serde(default, skip_serializing_if = "Option::is_none", rename = "awsElasticBlockStore")] pub aws_elastic_block_store: Option, /// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. @@ -3468,7 +5360,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumes { /// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime #[serde(default, skip_serializing_if = "Option::is_none")] pub cephfs: Option, - /// cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// cinder represents a cinder volume attached and mounted on kubelets host machine. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub cinder: Option, /// configMap represents a configMap that should populate this volume @@ -3480,46 +5373,91 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumes { /// downwardAPI represents downward API about the pod that should populate this volume #[serde(default, skip_serializing_if = "Option::is_none", rename = "downwardAPI")] pub downward_api: Option, - /// emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + /// emptyDir represents a temporary directory that shares a pod's lifetime. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[serde(default, skip_serializing_if = "Option::is_none", rename = "emptyDir")] pub empty_dir: Option, - /// ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. - /// Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). - /// Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. - /// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. - /// A pod can use both types of ephemeral volumes and persistent volumes at the same time. + /// ephemeral represents a volume that is handled by a cluster storage driver. + /// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + /// and deleted when the pod is removed. + /// + /// + /// Use this if: + /// a) the volume is only needed while the pod runs, + /// b) features of normal volumes like restoring from snapshot or capacity + /// tracking are needed, + /// c) the storage driver is specified through a storage class, and + /// d) the storage driver supports dynamic volume provisioning through + /// a PersistentVolumeClaim (see EphemeralVolumeSource for more + /// information on the connection between this volume type + /// and PersistentVolumeClaim). + /// + /// + /// Use PersistentVolumeClaim or one of the vendor-specific + /// APIs for volumes that persist for longer than the lifecycle + /// of an individual pod. + /// + /// + /// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + /// be used that way - see the documentation of the driver for + /// more information. + /// + /// + /// A pod can use both types of ephemeral volumes and + /// persistent volumes at the same time. #[serde(default, skip_serializing_if = "Option::is_none")] pub ephemeral: Option, /// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. #[serde(default, skip_serializing_if = "Option::is_none")] pub fc: Option, - /// flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + /// flexVolume represents a generic volume resource that is + /// provisioned/attached using an exec based plugin. #[serde(default, skip_serializing_if = "Option::is_none", rename = "flexVolume")] pub flex_volume: Option, /// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running #[serde(default, skip_serializing_if = "Option::is_none")] pub flocker: Option, - /// gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// gcePersistentDisk represents a GCE Disk resource that is attached to a + /// kubelet's host machine and then exposed to the pod. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(default, skip_serializing_if = "Option::is_none", rename = "gcePersistentDisk")] pub gce_persistent_disk: Option, - /// gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. + /// gitRepo represents a git repository at a particular revision. + /// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + /// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + /// into the Pod's container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gitRepo")] pub git_repo: Option, - /// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md + /// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub glusterfs: Option, - /// hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write. + /// hostPath represents a pre-existing file or directory on the host + /// machine that is directly exposed to the container. This is generally + /// used for system agents or other privileged things that are allowed + /// to see the host machine. Most containers will NOT need this. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + /// --- + /// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + /// mount host directories as read/write. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPath")] pub host_path: Option, - /// iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md + /// iscsi represents an ISCSI Disk resource that is attached to a + /// kubelet's host machine and then exposed to the pod. + /// More info: https://examples.k8s.io/volumes/iscsi/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub iscsi: Option, - /// name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// name of the volume. + /// Must be a DNS_LABEL and unique within the pod. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names pub name: String, - /// nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// nfs represents an NFS mount on the host that shares a pod's lifetime + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs #[serde(default, skip_serializing_if = "Option::is_none")] pub nfs: Option, - /// persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + /// persistentVolumeClaimVolumeSource represents a reference to a + /// PersistentVolumeClaim in the same namespace. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims #[serde(default, skip_serializing_if = "Option::is_none", rename = "persistentVolumeClaim")] pub persistent_volume_claim: Option, /// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine @@ -3534,13 +5472,15 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumes { /// quobyte represents a Quobyte mount on the host that shares a pod's lifetime #[serde(default, skip_serializing_if = "Option::is_none")] pub quobyte: Option, - /// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md + /// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + /// More info: https://examples.k8s.io/volumes/rbd/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub rbd: Option, /// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "scaleIO")] pub scale_io: Option, - /// secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + /// secret represents a secret that should populate this volume. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret #[serde(default, skip_serializing_if = "Option::is_none")] pub secret: Option, /// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. @@ -3551,19 +5491,30 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumes { pub vsphere_volume: Option, } -/// awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore +/// awsElasticBlockStore represents an AWS Disk resource that is attached to a +/// kubelet's host machine and then exposed to the pod. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesAwsElasticBlockStore { - /// fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + /// partition is the partition in the volume that you want to mount. + /// If omitted, the default is to mount by volume name. + /// Examples: For volume /dev/sda1, you specify the partition as "1". + /// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). #[serde(default, skip_serializing_if = "Option::is_none")] pub partition: Option, - /// readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// readOnly value true will force the readOnly setting in VolumeMounts. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[serde(rename = "volumeID")] pub volume_id: String, } @@ -3580,13 +5531,16 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesAzureDisk { /// diskURI is the URI of data disk in the blob storage #[serde(rename = "diskURI")] pub disk_uri: String, - /// fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is Filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option, - /// readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } @@ -3594,7 +5548,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesAzureDisk { /// azureFile represents an Azure File Service mount on the host and bind mount to the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesAzureFile { - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// secretName is the name of secret that contains Azure Storage Account Name and Key @@ -3608,54 +5563,74 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesAzureFile { /// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCephfs { - /// monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// monitors is Required: Monitors is a collection of Ceph monitors + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it pub monitors: Vec, /// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretFile")] pub secret_file: Option, - /// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// user is optional: User is the rados user name, default is admin + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub user: Option, } -/// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it +/// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. +/// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCephfsSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md +/// cinder represents a cinder volume attached and mounted on kubelets host machine. +/// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCinder { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef is optional: points to a secret object containing parameters used to connect to OpenStack. + /// secretRef is optional: points to a secret object containing parameters used to connect + /// to OpenStack. #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// volumeID used to identify the volume in cinder. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(rename = "volumeID")] pub volume_id: String, } -/// secretRef is optional: points to a secret object containing parameters used to connect to OpenStack. +/// secretRef is optional: points to a secret object containing parameters used to connect +/// to OpenStack. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCinderSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -3663,13 +5638,27 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCinderSecretRef { /// configMap represents a configMap that should populate this volume #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesConfigMap { - /// defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// defaultMode is optional: mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// Defaults to 0644. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, - /// items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items if unspecified, each key-value pair in the Data field of the referenced + /// ConfigMap will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the ConfigMap, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// optional specify whether the ConfigMap or its keys must be defined @@ -3682,36 +5671,59 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesConfigMap { pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesConfigMapItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } /// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCsi { - /// driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + /// driver is the name of the CSI driver that handles this volume. + /// Consult with your admin for the correct name as registered in the cluster. pub driver: String, - /// fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + /// fsType to mount. Ex. "ext4", "xfs", "ntfs". + /// If not provided, the empty value is passed to the associated CSI driver + /// which will determine the default filesystem to apply. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + /// nodePublishSecretRef is a reference to the secret object containing + /// sensitive information to pass to the CSI driver to complete the CSI + /// NodePublishVolume and NodeUnpublishVolume calls. + /// This field is optional, and may be empty if no secret is required. If the + /// secret object contains more than one secret, all secret references are passed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodePublishSecretRef")] pub node_publish_secret_ref: Option, - /// readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + /// readOnly specifies a read-only configuration for the volume. + /// Defaults to false (read/write). #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + /// volumeAttributes stores driver-specific properties that are passed to the CSI + /// driver. Consult your driver's documentation for supported values. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeAttributes")] pub volume_attributes: Option>, } -/// nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. +/// nodePublishSecretRef is a reference to the secret object containing +/// sensitive information to pass to the CSI driver to complete the CSI +/// NodePublishVolume and NodeUnpublishVolume calls. +/// This field is optional, and may be empty if no secret is required. If the +/// secret object contains more than one secret, all secret references are passed. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCsiNodePublishSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -3719,7 +5731,14 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesCsiNodePublishSecr /// downwardAPI represents downward API about the pod that should populate this volume #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesDownwardApi { - /// Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// Optional: mode bits to use on created files by default. Must be a + /// Optional: mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// Defaults to 0644. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, /// Items is a list of downward API volume file @@ -3733,12 +5752,18 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesDownwardApiItems { /// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// Optional: mode bits used to set permissions on this file, must be an octal value + /// between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, /// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' pub path: String, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, } @@ -3754,7 +5779,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesDownwardApiItemsFi pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesDownwardApiItemsResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -3767,46 +5793,125 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesDownwardApiItemsRe pub resource: String, } -/// emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir +/// emptyDir represents a temporary directory that shares a pod's lifetime. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEmptyDir { - /// medium represents what type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + /// medium represents what type of storage medium should back this directory. + /// The default is "" which means to use the node's default medium. + /// Must be an empty string (default) or Memory. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[serde(default, skip_serializing_if = "Option::is_none")] pub medium: Option, - /// sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + /// sizeLimit is the total amount of local storage required for this EmptyDir volume. + /// The size limit is also applicable for memory medium. + /// The maximum usage on memory medium EmptyDir would be the minimum value between + /// the SizeLimit specified here and the sum of memory limits of all containers in a pod. + /// The default is nil which means that the limit is undefined. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[serde(default, skip_serializing_if = "Option::is_none", rename = "sizeLimit")] pub size_limit: Option, } -/// ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. -/// Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). -/// Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. -/// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. -/// A pod can use both types of ephemeral volumes and persistent volumes at the same time. +/// ephemeral represents a volume that is handled by a cluster storage driver. +/// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, +/// and deleted when the pod is removed. +/// +/// +/// Use this if: +/// a) the volume is only needed while the pod runs, +/// b) features of normal volumes like restoring from snapshot or capacity +/// tracking are needed, +/// c) the storage driver is specified through a storage class, and +/// d) the storage driver supports dynamic volume provisioning through +/// a PersistentVolumeClaim (see EphemeralVolumeSource for more +/// information on the connection between this volume type +/// and PersistentVolumeClaim). +/// +/// +/// Use PersistentVolumeClaim or one of the vendor-specific +/// APIs for volumes that persist for longer than the lifecycle +/// of an individual pod. +/// +/// +/// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to +/// be used that way - see the documentation of the driver for +/// more information. +/// +/// +/// A pod can use both types of ephemeral volumes and +/// persistent volumes at the same time. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeral { - /// Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - /// An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. - /// This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. - /// Required, must not be nil. + /// Will be used to create a stand-alone PVC to provision the volume. + /// The pod in which this EphemeralVolumeSource is embedded will be the + /// owner of the PVC, i.e. the PVC will be deleted together with the + /// pod. The name of the PVC will be `-` where + /// `` is the name from the `PodSpec.Volumes` array + /// entry. Pod validation will reject the pod if the concatenated name + /// is not valid for a PVC (for example, too long). + /// + /// + /// An existing PVC with that name that is not owned by the pod + /// will *not* be used for the pod to avoid using an unrelated + /// volume by mistake. Starting the pod is then blocked until + /// the unrelated PVC is removed. If such a pre-created PVC is + /// meant to be used by the pod, the PVC has to updated with an + /// owner reference to the pod once the pod exists. Normally + /// this should not be necessary, but it may be useful when + /// manually reconstructing a broken cluster. + /// + /// + /// This field is read-only and no changes will be made by Kubernetes + /// to the PVC after it has been created. + /// + /// + /// Required, must not be nil. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeClaimTemplate")] pub volume_claim_template: Option, } -/// Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). -/// An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. -/// This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. -/// Required, must not be nil. +/// Will be used to create a stand-alone PVC to provision the volume. +/// The pod in which this EphemeralVolumeSource is embedded will be the +/// owner of the PVC, i.e. the PVC will be deleted together with the +/// pod. The name of the PVC will be `-` where +/// `` is the name from the `PodSpec.Volumes` array +/// entry. Pod validation will reject the pod if the concatenated name +/// is not valid for a PVC (for example, too long). +/// +/// +/// An existing PVC with that name that is not owned by the pod +/// will *not* be used for the pod to avoid using an unrelated +/// volume by mistake. Starting the pod is then blocked until +/// the unrelated PVC is removed. If such a pre-created PVC is +/// meant to be used by the pod, the PVC has to updated with an +/// owner reference to the pod once the pod exists. Normally +/// this should not be necessary, but it may be useful when +/// manually reconstructing a broken cluster. +/// +/// +/// This field is read-only and no changes will be made by Kubernetes +/// to the PVC after it has been created. +/// +/// +/// Required, must not be nil. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplate { - /// May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + /// May contain labels and annotations that will be copied into the PVC + /// when creating it. No other fields are allowed and will be rejected during + /// validation. #[serde(default, skip_serializing_if = "Option::is_none")] pub metadata: Option, - /// The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + /// The specification for the PersistentVolumeClaim. The entire content is + /// copied unchanged into the PVC that gets created from this + /// template. The same fields as in a PersistentVolumeClaim + /// are also valid here. pub spec: JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplateSpec, } -/// May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. +/// May contain labels and annotations that will be copied into the PVC +/// when creating it. No other fields are allowed and will be rejected during +/// validation. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplateMetadata { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -3821,31 +5926,81 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeCla pub namespace: Option, } -/// The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. +/// The specification for the PersistentVolumeClaim. The entire content is +/// copied unchanged into the PVC that gets created from this +/// template. The same fields as in a PersistentVolumeClaim +/// are also valid here. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplateSpec { - /// accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + /// accessModes contains the desired access modes the volume should have. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessModes")] pub access_modes: Option>, - /// dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. + /// dataSource field can be used to specify either: + /// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + /// * An existing PVC (PersistentVolumeClaim) + /// If the provisioner or an external controller can support the specified data source, + /// it will create a new volume based on the contents of the specified data source. + /// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + /// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + /// If the namespace is specified, then dataSourceRef will not be copied to dataSource. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSource")] pub data_source: Option, - /// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + /// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + /// volume is desired. This may be any object from a non-empty API group (non + /// core object) or a PersistentVolumeClaim object. + /// When this field is specified, volume binding will only succeed if the type of + /// the specified object matches some installed volume populator or dynamic + /// provisioner. + /// This field will replace the functionality of the dataSource field and as such + /// if both fields are non-empty, they must have the same value. For backwards + /// compatibility, when namespace isn't specified in dataSourceRef, + /// both fields (dataSource and dataSourceRef) will be set to the same + /// value automatically if one of them is empty and the other is non-empty. + /// When namespace is specified in dataSourceRef, + /// dataSource isn't set to the same value and must be empty. + /// There are three important differences between dataSource and dataSourceRef: + /// * While dataSource only allows two specific types of objects, dataSourceRef + /// allows any non-core object, as well as PersistentVolumeClaim objects. + /// * While dataSource ignores disallowed values (dropping them), dataSourceRef + /// preserves all values, and generates an error if a disallowed value is + /// specified. + /// * While dataSource only allows local objects, dataSourceRef allows objects + /// in any namespaces. + /// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + /// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSourceRef")] pub data_source_ref: Option, - /// resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + /// resources represents the minimum resources the volume should have. + /// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + /// that are lower than previous value but must still be higher than capacity recorded in the + /// status field of the claim. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, /// selector is a label query over volumes to consider for binding. #[serde(default, skip_serializing_if = "Option::is_none")] pub selector: Option, - /// storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + /// storageClassName is the name of the StorageClass required by the claim. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageClassName")] pub storage_class_name: Option, - /// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + /// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + /// If specified, the CSI driver will create or update the volume with the attributes defined + /// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + /// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + /// will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + /// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + /// will be set by the persistentvolume controller if it exists. + /// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + /// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + /// exists. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + /// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeAttributesClassName")] pub volume_attributes_class_name: Option, - /// volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + /// volumeMode defines what type of volume is required by the claim. + /// Value of Filesystem is implied when not included in claim spec. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMode")] pub volume_mode: Option, /// volumeName is the binding reference to the PersistentVolume backing this claim. @@ -3853,10 +6008,19 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeCla pub volume_name: Option, } -/// dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. +/// dataSource field can be used to specify either: +/// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) +/// * An existing PVC (PersistentVolumeClaim) +/// If the provisioner or an external controller can support the specified data source, +/// it will create a new volume based on the contents of the specified data source. +/// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, +/// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. +/// If the namespace is specified, then dataSourceRef will not be copied to dataSource. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecDataSource { - /// APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + /// APIGroup is the group for the resource being referenced. + /// If APIGroup is not specified, the specified Kind must be in the core API group. + /// For any other third-party types, APIGroup is required. #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiGroup")] pub api_group: Option, /// Kind is the type of resource being referenced @@ -3865,28 +6029,62 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeCla pub name: String, } -/// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. +/// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty +/// volume is desired. This may be any object from a non-empty API group (non +/// core object) or a PersistentVolumeClaim object. +/// When this field is specified, volume binding will only succeed if the type of +/// the specified object matches some installed volume populator or dynamic +/// provisioner. +/// This field will replace the functionality of the dataSource field and as such +/// if both fields are non-empty, they must have the same value. For backwards +/// compatibility, when namespace isn't specified in dataSourceRef, +/// both fields (dataSource and dataSourceRef) will be set to the same +/// value automatically if one of them is empty and the other is non-empty. +/// When namespace is specified in dataSourceRef, +/// dataSource isn't set to the same value and must be empty. +/// There are three important differences between dataSource and dataSourceRef: +/// * While dataSource only allows two specific types of objects, dataSourceRef +/// allows any non-core object, as well as PersistentVolumeClaim objects. +/// * While dataSource ignores disallowed values (dropping them), dataSourceRef +/// preserves all values, and generates an error if a disallowed value is +/// specified. +/// * While dataSource only allows local objects, dataSourceRef allows objects +/// in any namespaces. +/// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +/// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecDataSourceRef { - /// APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + /// APIGroup is the group for the resource being referenced. + /// If APIGroup is not specified, the specified Kind must be in the core API group. + /// For any other third-party types, APIGroup is required. #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiGroup")] pub api_group: Option, /// Kind is the type of resource being referenced pub kind: String, /// Name is the name of resource being referenced pub name: String, - /// Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + /// Namespace is the namespace of resource being referenced + /// Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + /// (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. #[serde(default, skip_serializing_if = "Option::is_none")] pub namespace: Option, } -/// resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +/// resources represents the minimum resources the volume should have. +/// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements +/// that are lower than previous value but must still be higher than capacity recorded in the +/// status field of the claim. +/// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecResources { - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -3897,19 +6095,26 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeCla /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -3917,46 +6122,65 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesEphemeralVolumeCla /// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesFc { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// lun is Optional: FC target lun number #[serde(default, skip_serializing_if = "Option::is_none")] pub lun: Option, - /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// targetWWNs is Optional: FC target worldwide names (WWNs) #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetWWNs")] pub target_ww_ns: Option>, - /// wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + /// wwids Optional: FC volume world wide identifiers (wwids) + /// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. #[serde(default, skip_serializing_if = "Option::is_none")] pub wwids: Option>, } -/// flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. +/// flexVolume represents a generic volume resource that is +/// provisioned/attached using an exec based plugin. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesFlexVolume { /// driver is the name of the driver to use for this volume. pub driver: String, - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// options is Optional: this field holds extra command options if any. #[serde(default, skip_serializing_if = "Option::is_none")] pub options: Option>, - /// readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly is Optional: defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. + /// secretRef is Optional: secretRef is reference to the secret object containing + /// sensitive information to pass to the plugin scripts. This may be + /// empty if no secret object is specified. If the secret object + /// contains more than one secret, all secrets are passed to the plugin + /// scripts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, } -/// secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. +/// secretRef is Optional: secretRef is reference to the secret object containing +/// sensitive information to pass to the plugin scripts. This may be +/// empty if no secret object is specified. If the secret object +/// contains more than one secret, all secrets are passed to the plugin +/// scripts. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesFlexVolumeSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -3964,7 +6188,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesFlexVolumeSecretRe /// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesFlocker { - /// datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + /// datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + /// should be considered as deprecated #[serde(default, skip_serializing_if = "Option::is_none", rename = "datasetName")] pub dataset_name: Option, /// datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset @@ -3972,27 +6197,46 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesFlocker { pub dataset_uuid: Option, } -/// gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk +/// gcePersistentDisk represents a GCE Disk resource that is attached to a +/// kubelet's host machine and then exposed to the pod. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesGcePersistentDisk { - /// fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// partition is the partition in the volume that you want to mount. + /// If omitted, the default is to mount by volume name. + /// Examples: For volume /dev/sda1, you specify the partition as "1". + /// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(default, skip_serializing_if = "Option::is_none")] pub partition: Option, - /// pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(rename = "pdName")] pub pd_name: String, - /// readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// readOnly here will force the ReadOnly setting in VolumeMounts. + /// Defaults to false. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } -/// gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. +/// gitRepo represents a git repository at a particular revision. +/// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +/// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +/// into the Pod's container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesGitRepo { - /// directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + /// directory is the target directory name. + /// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + /// git repository. Otherwise, if specified, the volume will contain the git repository in + /// the subdirectory with the given name. #[serde(default, skip_serializing_if = "Option::is_none")] pub directory: Option, /// repository is the URL @@ -4002,29 +6246,47 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesGitRepo { pub revision: Option, } -/// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md +/// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. +/// More info: https://examples.k8s.io/volumes/glusterfs/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesGlusterfs { - /// endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + /// endpoints is the endpoint name that details Glusterfs topology. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod pub endpoints: String, - /// path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + /// path is the Glusterfs volume path. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod pub path: String, - /// readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + /// readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + /// Defaults to false. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } -/// hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write. +/// hostPath represents a pre-existing file or directory on the host +/// machine that is directly exposed to the container. This is generally +/// used for system agents or other privileged things that are allowed +/// to see the host machine. Most containers will NOT need this. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath +/// --- +/// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not +/// mount host directories as read/write. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesHostPath { - /// path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + /// path of the directory on the host. + /// If the path is a symlink, it will follow the link to the real path. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath pub path: String, - /// type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + /// type for HostPath Volume + /// Defaults to "" + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] pub r#type: Option, } -/// iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md +/// iscsi represents an ISCSI Disk resource that is attached to a +/// kubelet's host machine and then exposed to the pod. +/// More info: https://examples.k8s.io/volumes/iscsi/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesIscsi { /// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication @@ -4033,29 +6295,39 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesIscsi { /// chapAuthSession defines whether support iSCSI Session CHAP authentication #[serde(default, skip_serializing_if = "Option::is_none", rename = "chapAuthSession")] pub chap_auth_session: Option, - /// fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + /// initiatorName is the custom iSCSI Initiator Name. + /// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + /// : will be created for the connection. #[serde(default, skip_serializing_if = "Option::is_none", rename = "initiatorName")] pub initiator_name: Option, /// iqn is the target iSCSI Qualified Name. pub iqn: String, - /// iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + /// iscsiInterface is the interface Name that uses an iSCSI transport. + /// Defaults to 'default' (tcp). #[serde(default, skip_serializing_if = "Option::is_none", rename = "iscsiInterface")] pub iscsi_interface: Option, /// lun represents iSCSI Target Lun number. pub lun: i32, - /// portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + /// portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + /// is other than default (typically TCP ports 860 and 3260). #[serde(default, skip_serializing_if = "Option::is_none")] pub portals: Option>, - /// readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + /// readOnly here will force the ReadOnly setting in VolumeMounts. + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// secretRef is the CHAP Secret for iSCSI target and initiator authentication #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + /// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + /// is other than default (typically TCP ports 860 and 3260). #[serde(rename = "targetPortal")] pub target_portal: String, } @@ -4063,30 +6335,41 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesIscsi { /// secretRef is the CHAP Secret for iSCSI target and initiator authentication #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesIscsiSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs +/// nfs represents an NFS mount on the host that shares a pod's lifetime +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesNfs { - /// path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// path that is exported by the NFS server. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs pub path: String, - /// readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// readOnly here will force the NFS export to be mounted with read-only permissions. + /// Defaults to false. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// server is the hostname or IP address of the NFS server. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs pub server: String, } -/// persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims +/// persistentVolumeClaimVolumeSource represents a reference to a +/// PersistentVolumeClaim in the same namespace. +/// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesPersistentVolumeClaim { - /// claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + /// claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims #[serde(rename = "claimName")] pub claim_name: String, - /// readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + /// readOnly Will force the ReadOnly setting in VolumeMounts. + /// Default false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } @@ -4094,7 +6377,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesPersistentVolumeCl /// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesPhotonPersistentDisk { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// pdID is the ID that identifies Photon Controller persistent disk @@ -4105,10 +6390,13 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesPhotonPersistentDi /// portworxVolume represents a portworx volume attached and mounted on kubelets host machine #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesPortworxVolume { - /// fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + /// fSType represents the filesystem type to mount + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// volumeID uniquely identifies a Portworx volume @@ -4119,7 +6407,12 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesPortworxVolume { /// projected items for all in one resources secrets, configmaps, and downward API #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjected { - /// defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// defaultMode are the mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, /// sources is the list of volume projections @@ -4130,10 +6423,22 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjected { /// Projection that may be projected along with other supported volume types #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSources { - /// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. - /// Alpha, gated by the ClusterTrustBundleProjection feature gate. - /// ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. - /// Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time. + /// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + /// of ClusterTrustBundle objects in an auto-updating file. + /// + /// + /// Alpha, gated by the ClusterTrustBundleProjection feature gate. + /// + /// + /// ClusterTrustBundle objects can either be selected by name, or by the + /// combination of signer name and a label selector. + /// + /// + /// Kubelet performs aggressive normalization of the PEM contents written + /// into the pod filesystem. Esoteric PEM features such as inter-block + /// comments and block headers are stripped. Certificates are deduplicated. + /// The ordering of certificates within the file is arbitrary, and Kubelet + /// may change the order over time. #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterTrustBundle")] pub cluster_trust_bundle: Option, /// configMap information about the configMap data to project @@ -4150,47 +6455,79 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSources { pub service_account_token: Option, } -/// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. -/// Alpha, gated by the ClusterTrustBundleProjection feature gate. -/// ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. -/// Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time. +/// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field +/// of ClusterTrustBundle objects in an auto-updating file. +/// +/// +/// Alpha, gated by the ClusterTrustBundleProjection feature gate. +/// +/// +/// ClusterTrustBundle objects can either be selected by name, or by the +/// combination of signer name and a label selector. +/// +/// +/// Kubelet performs aggressive normalization of the PEM contents written +/// into the pod filesystem. Esoteric PEM features such as inter-block +/// comments and block headers are stripped. Certificates are deduplicated. +/// The ordering of certificates within the file is arbitrary, and Kubelet +/// may change the order over time. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesClusterTrustBundle { - /// Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything". + /// Select all ClusterTrustBundles that match this label selector. Only has + /// effect if signerName is set. Mutually-exclusive with name. If unset, + /// interpreted as "match nothing". If set but empty, interpreted as "match + /// everything". #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector. + /// Select a single ClusterTrustBundle by object name. Mutually-exclusive + /// with signerName and labelSelector. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles. + /// If true, don't block pod startup if the referenced ClusterTrustBundle(s) + /// aren't available. If using name, then the named ClusterTrustBundle is + /// allowed not to exist. If using signerName, then the combination of + /// signerName and labelSelector is allowed to match zero + /// ClusterTrustBundles. #[serde(default, skip_serializing_if = "Option::is_none")] pub optional: Option, /// Relative path from the volume root to write the bundle. pub path: String, - /// Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated. + /// Select all ClusterTrustBundles that match this signer name. + /// Mutually-exclusive with name. The contents of all selected + /// ClusterTrustBundles will be unified and deduplicated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "signerName")] pub signer_name: Option, } -/// Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything". +/// Select all ClusterTrustBundles that match this label selector. Only has +/// effect if signerName is set. Mutually-exclusive with name. If unset, +/// interpreted as "match nothing". If set but empty, interpreted as "match +/// everything". #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesClusterTrustBundleLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesClusterTrustBundleLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -4198,10 +6535,18 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesCl /// configMap information about the configMap data to project #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesConfigMap { - /// items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items if unspecified, each key-value pair in the Data field of the referenced + /// ConfigMap will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the ConfigMap, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// optional specify whether the ConfigMap or its keys must be defined @@ -4214,10 +6559,18 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesCo pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesConfigMapItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } @@ -4235,12 +6588,18 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesDo /// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// Optional: mode bits used to set permissions on this file, must be an octal value + /// between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, /// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' pub path: String, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, } @@ -4256,7 +6615,8 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesDo pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesDownwardApiItemsResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -4272,10 +6632,18 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesDo /// secret information about the secret data to project #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesSecret { - /// items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items if unspecified, each key-value pair in the Data field of the referenced + /// Secret will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the Secret, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// optional field specify whether the Secret or its key must be defined @@ -4288,78 +6656,124 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesSe pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesSecretItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } /// serviceAccountToken is information about the serviceAccountToken data to project #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesProjectedSourcesServiceAccountToken { - /// audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + /// audience is the intended audience of the token. A recipient of a token + /// must identify itself with an identifier specified in the audience of the + /// token, and otherwise should reject the token. The audience defaults to the + /// identifier of the apiserver. #[serde(default, skip_serializing_if = "Option::is_none")] pub audience: Option, - /// expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + /// expirationSeconds is the requested duration of validity of the service + /// account token. As the token approaches expiration, the kubelet volume + /// plugin will proactively rotate the service account token. The kubelet will + /// start trying to rotate the token if the token is older than 80 percent of + /// its time to live or if the token is older than 24 hours.Defaults to 1 hour + /// and must be at least 10 minutes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "expirationSeconds")] pub expiration_seconds: Option, - /// path is the path relative to the mount point of the file to project the token into. + /// path is the path relative to the mount point of the file to project the + /// token into. pub path: String, } /// quobyte represents a Quobyte mount on the host that shares a pod's lifetime #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesQuobyte { - /// group to map volume access to Default is no group + /// group to map volume access to + /// Default is no group #[serde(default, skip_serializing_if = "Option::is_none")] pub group: Option, - /// readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + /// readOnly here will force the Quobyte volume to be mounted with read-only permissions. + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + /// registry represents a single or multiple Quobyte Registry services + /// specified as a string as host:port pair (multiple entries are separated with commas) + /// which acts as the central registry for volumes pub registry: String, - /// tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + /// tenant owning the given Quobyte volume in the Backend + /// Used with dynamically provisioned Quobyte volumes, value is set by the plugin #[serde(default, skip_serializing_if = "Option::is_none")] pub tenant: Option, - /// user to map volume access to Defaults to serivceaccount user + /// user to map volume access to + /// Defaults to serivceaccount user #[serde(default, skip_serializing_if = "Option::is_none")] pub user: Option, /// volume is a string that references an already created Quobyte volume by name. pub volume: String, } -/// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md +/// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. +/// More info: https://examples.k8s.io/volumes/rbd/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesRbd { - /// fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// image is the rados image name. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it pub image: String, - /// keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// keyring is the path to key ring for RBDUser. + /// Default is /etc/ceph/keyring. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub keyring: Option, - /// monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// monitors is a collection of Ceph monitors. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it pub monitors: Vec, - /// pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// pool is the rados pool name. + /// Default is rbd. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub pool: Option, - /// readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// readOnly here will force the ReadOnly setting in VolumeMounts. + /// Defaults to false. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// secretRef is name of the authentication secret for RBDUser. If provided + /// overrides keyring. + /// Default is nil. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// user is the rados user name. + /// Default is admin. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub user: Option, } -/// secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it +/// secretRef is name of the authentication secret for RBDUser. If provided +/// overrides keyring. +/// Default is nil. +/// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesRbdSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -4367,7 +6781,10 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesRbdSecretRef { /// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesScaleIo { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". + /// Default is "xfs". #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// gateway is the host address of the ScaleIO API Gateway. @@ -4375,16 +6792,19 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesScaleIo { /// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. #[serde(default, skip_serializing_if = "Option::is_none", rename = "protectionDomain")] pub protection_domain: Option, - /// readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + /// secretRef references to the secret for ScaleIO user and other + /// sensitive information. If this is not provided, Login operation will fail. #[serde(rename = "secretRef")] pub secret_ref: JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesScaleIoSecretRef, /// sslEnabled Flag enable/disable SSL communication with Gateway, default false #[serde(default, skip_serializing_if = "Option::is_none", rename = "sslEnabled")] pub ssl_enabled: Option, - /// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + /// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + /// Default is ThinProvisioned. #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageMode")] pub storage_mode: Option, /// storagePool is the ScaleIO Storage Pool associated with the protection domain. @@ -4392,32 +6812,50 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesScaleIo { pub storage_pool: Option, /// system is the name of the storage system as configured in ScaleIO. pub system: String, - /// volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + /// volumeName is the name of a volume already created in the ScaleIO system + /// that is associated with this volume source. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeName")] pub volume_name: Option, } -/// secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. +/// secretRef references to the secret for ScaleIO user and other +/// sensitive information. If this is not provided, Login operation will fail. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesScaleIoSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret +/// secret represents a secret that should populate this volume. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesSecret { - /// defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// defaultMode is Optional: mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values + /// for mode bits. Defaults to 0644. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, - /// items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items If unspecified, each key-value pair in the Data field of the referenced + /// Secret will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the Secret, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, /// optional field specify whether the Secret or its keys must be defined #[serde(default, skip_serializing_if = "Option::is_none")] pub optional: Option, - /// secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + /// secretName is the name of the secret in the pod's namespace to use. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretName")] pub secret_name: Option, } @@ -4427,37 +6865,58 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesSecret { pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesSecretItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } /// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesStorageos { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + /// secretRef specifies the secret to use for obtaining the StorageOS API + /// credentials. If not specified, default values will be attempted. #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + /// volumeName is the human-readable name of the StorageOS volume. Volume + /// names are only unique within a namespace. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeName")] pub volume_name: Option, - /// volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + /// volumeNamespace specifies the scope of the volume within StorageOS. If no + /// namespace is specified then the Pod's namespace will be used. This allows the + /// Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + /// Set VolumeName to any name to override the default behaviour. + /// Set to "default" if you are not using namespaces within StorageOS. + /// Namespaces that do not pre-exist within StorageOS will be created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeNamespace")] pub volume_namespace: Option, } -/// secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. +/// secretRef specifies the secret to use for obtaining the StorageOS API +/// credentials. If not specified, default values will be attempted. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesStorageosSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -4465,7 +6924,9 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesStorageosSecretRef /// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesVsphereVolume { - /// fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. @@ -4479,17 +6940,42 @@ pub struct JobSetReplicatedJobsTemplateSpecTemplateSpecVolumesVsphereVolume { pub volume_path: String, } -/// SuccessPolicy configures when to declare the JobSet as succeeded. The JobSet is always declared succeeded if all jobs in the set finished with status complete. +/// StartupPolicy, if set, configures in what order jobs must be started +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct JobSetStartupPolicy { + /// StartupPolicyOrder determines the startup order of the ReplicatedJobs. + /// AnyOrder means to start replicated jobs in any order. + /// InOrder means to start them as they are listed in the JobSet. A ReplicatedJob is started only + /// when all the jobs of the previous one are ready. + #[serde(rename = "startupPolicyOrder")] + pub startup_policy_order: JobSetStartupPolicyStartupPolicyOrder, +} + +/// StartupPolicy, if set, configures in what order jobs must be started +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum JobSetStartupPolicyStartupPolicyOrder { + AnyOrder, + InOrder, +} + +/// SuccessPolicy configures when to declare the JobSet as +/// succeeded. +/// The JobSet is always declared succeeded if all jobs in the set +/// finished with status complete. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetSuccessPolicy { /// Operator determines either All or Any of the selected jobs should succeed to consider the JobSet successful pub operator: JobSetSuccessPolicyOperator, - /// TargetReplicatedJobs are the names of the replicated jobs the operator will apply to. A null or empty list will apply to all replicatedJobs. + /// TargetReplicatedJobs are the names of the replicated jobs the operator will apply to. + /// A null or empty list will apply to all replicatedJobs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetReplicatedJobs")] pub target_replicated_jobs: Option>, } -/// SuccessPolicy configures when to declare the JobSet as succeeded. The JobSet is always declared succeeded if all jobs in the set finished with status complete. +/// SuccessPolicy configures when to declare the JobSet as +/// succeeded. +/// The JobSet is always declared succeeded if all jobs in the set +/// finished with status complete. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum JobSetSuccessPolicyOperator { All, @@ -4509,31 +6995,71 @@ pub struct JobSetStatus { pub restarts: Option, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct JobSetStatusConditions { - /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + /// lastTransitionTime is the last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. #[serde(rename = "lastTransitionTime")] pub last_transition_time: String, - /// message is a human readable message indicating details about the transition. This may be an empty string. + /// message is a human readable message indicating details about the transition. + /// This may be an empty string. pub message: String, - /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + /// observedGeneration represents the .metadata.generation that the condition was set based upon. + /// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + /// with respect to the current state of the instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] pub observed_generation: Option, - /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. + /// Producers of specific condition types may define expected values and meanings for this field, + /// and whether the values are considered a guaranteed API. + /// The value should be a CamelCase string. + /// This field may not be empty. pub reason: String, /// status of the condition, one of True, False, Unknown. pub status: JobSetStatusConditionsStatus, - /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + /// type of condition in CamelCase or in foo.example.com/CamelCase. + /// --- + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + /// useful (see .node.status.conditions), the ability to deconflict is important. + /// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) #[serde(rename = "type")] pub r#type: String, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum JobSetStatusConditionsStatus { True, diff --git a/kube-custom-resources-rs/src/karpenter_k8s_aws/v1beta1/ec2nodeclasses.rs b/kube-custom-resources-rs/src/karpenter_k8s_aws/v1beta1/ec2nodeclasses.rs index da4e75e78..6703d70b8 100644 --- a/kube-custom-resources-rs/src/karpenter_k8s_aws/v1beta1/ec2nodeclasses.rs +++ b/kube-custom-resources-rs/src/karpenter_k8s_aws/v1beta1/ec2nodeclasses.rs @@ -20,6 +20,9 @@ pub struct EC2NodeClassSpec { /// AMISelectorTerms is a list of or ami selector terms. The terms are ORed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "amiSelectorTerms")] pub ami_selector_terms: Option>, + /// AssociatePublicIPAddress controls if public IP addresses are assigned to instances that are launched with the nodeclass. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "associatePublicIPAddress")] + pub associate_public_ip_address: Option, /// BlockDeviceMappings to be applied to provisioned nodes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockDeviceMappings")] pub block_device_mappings: Option>, diff --git a/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/aliases.rs b/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/aliases.rs index 58b82680d..264ebda7c 100644 --- a/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/aliases.rs +++ b/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/aliases.rs @@ -12,35 +12,72 @@ use serde::{Serialize, Deserialize}; #[kube(status = "AliasStatus")] #[kube(schema = "disabled")] pub struct AliasSpec { - /// Specifies the alias name. This value must begin with alias/ followed by a name, such as alias/ExampleAlias. - /// The AliasName value must be string of 1-256 characters. It can contain only alphanumeric characters, forward slashes (/), underscores (_), and dashes (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is reserved for Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). + /// Specifies the alias name. This value must begin with alias/ followed by a + /// name, such as alias/ExampleAlias. + /// + /// + /// The AliasName value must be string of 1-256 characters. It can contain only + /// alphanumeric characters, forward slashes (/), underscores (_), and dashes + /// (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is + /// reserved for Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). pub name: String, - /// Associates the alias with the specified customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). The KMS key must be in the same Amazon Web Services Region. - /// A valid key ID is required. If you supply a null or empty string value, this operation returns an error. - /// For help finding the key ID and ARN, see Finding the Key ID and ARN (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) in the Key Management Service Developer Guide . - /// Specify the key ID or key ARN of the KMS key. - /// For example: - /// * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - /// * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - /// To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + /// Associates the alias with the specified customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). + /// The KMS key must be in the same Amazon Web Services Region. + /// + /// + /// A valid key ID is required. If you supply a null or empty string value, this + /// operation returns an error. + /// + /// + /// For help finding the key ID and ARN, see Finding the Key ID and ARN (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) + /// in the Key Management Service Developer Guide . + /// + /// + /// Specify the key ID or key ARN of the KMS key. + /// + /// + /// For example: + /// + /// + /// * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + /// + /// + /// * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + /// + /// + /// To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetKeyID")] pub target_key_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetKeyRef")] pub target_key_ref: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AliasTargetKeyRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AliasTargetKeyRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -50,28 +87,45 @@ pub struct AliasTargetKeyRefFrom { /// AliasStatus defines the observed state of Alias #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AliasStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AliasStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AliasStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/grants.rs b/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/grants.rs index 541325139..6ed3f2997 100644 --- a/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/grants.rs +++ b/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/grants.rs @@ -13,53 +13,172 @@ use std::collections::BTreeMap; #[kube(status = "GrantStatus")] #[kube(schema = "disabled")] pub struct GrantSpec { - /// Specifies a grant constraint. - /// KMS supports the EncryptionContextEquals and EncryptionContextSubset grant constraints. Each constraint value can include up to 8 encryption context pairs. The encryption context value in each constraint cannot exceed 384 characters. For information about grant constraints, see Using grant constraints (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) in the Key Management Service Developer Guide. For more information about encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the Key Management Service Developer Guide . - /// The encryption context grant constraints allow the permissions in the grant only when the encryption context in the request matches (EncryptionContextEquals) or includes (EncryptionContextSubset) the encryption context specified in this structure. - /// The encryption context grant constraints are supported only on grant operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) that include an EncryptionContext parameter, such as cryptographic operations on symmetric encryption KMS keys. Grants with grant constraints can include the DescribeKey and RetireGrant operations, but the constraint doesn't apply to these operations. If a grant with a grant constraint includes the CreateGrant operation, the constraint requires that any grants created with the CreateGrant permission have an equally strict or stricter encryption context constraint. - /// You cannot use an encryption context grant constraint for cryptographic operations with asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption context. + /// Specifies a grant constraint. + /// + /// + /// KMS supports the EncryptionContextEquals and EncryptionContextSubset grant + /// constraints. Each constraint value can include up to 8 encryption context + /// pairs. The encryption context value in each constraint cannot exceed 384 + /// characters. For information about grant constraints, see Using grant constraints + /// (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) + /// in the Key Management Service Developer Guide. For more information about + /// encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + /// in the Key Management Service Developer Guide . + /// + /// + /// The encryption context grant constraints allow the permissions in the grant + /// only when the encryption context in the request matches (EncryptionContextEquals) + /// or includes (EncryptionContextSubset) the encryption context specified in + /// this structure. + /// + /// + /// The encryption context grant constraints are supported only on grant operations + /// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + /// that include an EncryptionContext parameter, such as cryptographic operations + /// on symmetric encryption KMS keys. Grants with grant constraints can include + /// the DescribeKey and RetireGrant operations, but the constraint doesn't apply + /// to these operations. If a grant with a grant constraint includes the CreateGrant + /// operation, the constraint requires that any grants created with the CreateGrant + /// permission have an equally strict or stricter encryption context constraint. + /// + /// + /// You cannot use an encryption context grant constraint for cryptographic operations + /// with asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption + /// context. #[serde(default, skip_serializing_if = "Option::is_none")] pub constraints: Option, - /// A list of grant tokens. - /// Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) in the Key Management Service Developer Guide. + /// A list of grant tokens. + /// + /// + /// Use a grant token when your permission to call this operation comes from + /// a new grant that has not yet achieved eventual consistency. For more information, + /// see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + /// and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + /// in the Key Management Service Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "grantTokens")] pub grant_tokens: Option>, - /// The identity that gets the permissions specified in the grant. - /// To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of an Amazon Web Services principal. Valid Amazon Web Services principals include Amazon Web Services accounts (root), IAM users, IAM roles, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) in the Example ARNs section of the Amazon Web Services General Reference. + /// The identity that gets the permissions specified in the grant. + /// + /// + /// To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + /// of an Amazon Web Services principal. Valid Amazon Web Services principals + /// include Amazon Web Services accounts (root), IAM users, IAM roles, federated + /// users, and assumed role users. For examples of the ARN syntax to use for + /// specifying a principal, see Amazon Web Services Identity and Access Management + /// (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + /// in the Example ARNs section of the Amazon Web Services General Reference. #[serde(rename = "granteePrincipal")] pub grantee_principal: String, - /// Identifies the KMS key for the grant. The grant gives principals permission to use this KMS key. - /// Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different Amazon Web Services account, you must use the key ARN. - /// For example: - /// * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - /// * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - /// To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + /// Identifies the KMS key for the grant. The grant gives principals permission + /// to use this KMS key. + /// + /// + /// Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different + /// Amazon Web Services account, you must use the key ARN. + /// + /// + /// For example: + /// + /// + /// * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + /// + /// + /// * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + /// + /// + /// To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyID")] pub key_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyRef")] pub key_ref: Option, - /// A friendly name for the grant. Use this value to prevent the unintended creation of duplicate grants when retrying this request. - /// When this value is absent, all CreateGrant requests result in a new grant with a unique GrantId even if all the supplied parameters are identical. This can result in unintended duplicates when you retry the CreateGrant request. - /// When this value is present, you can retry a CreateGrant request with identical parameters; if the grant already exists, the original GrantId is returned without creating a new grant. Note that the returned grant token is unique with every CreateGrant request, even when a duplicate GrantId is returned. All grant tokens for the same grant ID can be used interchangeably. + /// A friendly name for the grant. Use this value to prevent the unintended creation + /// of duplicate grants when retrying this request. + /// + /// + /// When this value is absent, all CreateGrant requests result in a new grant + /// with a unique GrantId even if all the supplied parameters are identical. + /// This can result in unintended duplicates when you retry the CreateGrant request. + /// + /// + /// When this value is present, you can retry a CreateGrant request with identical + /// parameters; if the grant already exists, the original GrantId is returned + /// without creating a new grant. Note that the returned grant token is unique + /// with every CreateGrant request, even when a duplicate GrantId is returned. + /// All grant tokens for the same grant ID can be used interchangeably. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// A list of operations that the grant permits. - /// This list must include only operations that are permitted in a grant. Also, the operation must be supported on the KMS key. For example, you cannot create a grant for a symmetric encryption KMS key that allows the Sign operation, or a grant for an asymmetric KMS key that allows the GenerateDataKey operation. If you try, KMS returns a ValidationError exception. For details, see Grant operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) in the Key Management Service Developer Guide. + /// A list of operations that the grant permits. + /// + /// + /// This list must include only operations that are permitted in a grant. Also, + /// the operation must be supported on the KMS key. For example, you cannot create + /// a grant for a symmetric encryption KMS key that allows the Sign operation, + /// or a grant for an asymmetric KMS key that allows the GenerateDataKey operation. + /// If you try, KMS returns a ValidationError exception. For details, see Grant + /// operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + /// in the Key Management Service Developer Guide. pub operations: Vec, - /// The principal that has permission to use the RetireGrant operation to retire the grant. - /// To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of an Amazon Web Services principal. Valid Amazon Web Services principals include Amazon Web Services accounts (root), IAM users, federated users, and assumed role users. For examples of the ARN syntax to use for specifying a principal, see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) in the Example ARNs section of the Amazon Web Services General Reference. - /// The grant determines the retiring principal. Other principals might have permission to retire the grant or revoke the grant. For details, see RevokeGrant and Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) in the Key Management Service Developer Guide. + /// The principal that has permission to use the RetireGrant operation to retire + /// the grant. + /// + /// + /// To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + /// of an Amazon Web Services principal. Valid Amazon Web Services principals + /// include Amazon Web Services accounts (root), IAM users, federated users, + /// and assumed role users. For examples of the ARN syntax to use for specifying + /// a principal, see Amazon Web Services Identity and Access Management (IAM) + /// (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + /// in the Example ARNs section of the Amazon Web Services General Reference. + /// + /// + /// The grant determines the retiring principal. Other principals might have + /// permission to retire the grant or revoke the grant. For details, see RevokeGrant + /// and Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) + /// in the Key Management Service Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "retiringPrincipal")] pub retiring_principal: Option, } -/// Specifies a grant constraint. -/// KMS supports the EncryptionContextEquals and EncryptionContextSubset grant constraints. Each constraint value can include up to 8 encryption context pairs. The encryption context value in each constraint cannot exceed 384 characters. For information about grant constraints, see Using grant constraints (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) in the Key Management Service Developer Guide. For more information about encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the Key Management Service Developer Guide . -/// The encryption context grant constraints allow the permissions in the grant only when the encryption context in the request matches (EncryptionContextEquals) or includes (EncryptionContextSubset) the encryption context specified in this structure. -/// The encryption context grant constraints are supported only on grant operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) that include an EncryptionContext parameter, such as cryptographic operations on symmetric encryption KMS keys. Grants with grant constraints can include the DescribeKey and RetireGrant operations, but the constraint doesn't apply to these operations. If a grant with a grant constraint includes the CreateGrant operation, the constraint requires that any grants created with the CreateGrant permission have an equally strict or stricter encryption context constraint. -/// You cannot use an encryption context grant constraint for cryptographic operations with asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption context. +/// Specifies a grant constraint. +/// +/// +/// KMS supports the EncryptionContextEquals and EncryptionContextSubset grant +/// constraints. Each constraint value can include up to 8 encryption context +/// pairs. The encryption context value in each constraint cannot exceed 384 +/// characters. For information about grant constraints, see Using grant constraints +/// (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) +/// in the Key Management Service Developer Guide. For more information about +/// encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +/// in the Key Management Service Developer Guide . +/// +/// +/// The encryption context grant constraints allow the permissions in the grant +/// only when the encryption context in the request matches (EncryptionContextEquals) +/// or includes (EncryptionContextSubset) the encryption context specified in +/// this structure. +/// +/// +/// The encryption context grant constraints are supported only on grant operations +/// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) +/// that include an EncryptionContext parameter, such as cryptographic operations +/// on symmetric encryption KMS keys. Grants with grant constraints can include +/// the DescribeKey and RetireGrant operations, but the constraint doesn't apply +/// to these operations. If a grant with a grant constraint includes the CreateGrant +/// operation, the constraint requires that any grants created with the CreateGrant +/// permission have an equally strict or stricter encryption context constraint. +/// +/// +/// You cannot use an encryption context grant constraint for cryptographic operations +/// with asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption +/// context. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GrantConstraints { #[serde(default, skip_serializing_if = "Option::is_none", rename = "encryptionContextEquals")] @@ -68,16 +187,24 @@ pub struct GrantConstraints { pub encryption_context_subset: Option>, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GrantKeyRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GrantKeyRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -87,36 +214,61 @@ pub struct GrantKeyRefFrom { /// GrantStatus defines the observed state of Grant #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GrantStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The unique identifier for the grant. - /// You can use the GrantId in a ListGrants, RetireGrant, or RevokeGrant operation. + /// The unique identifier for the grant. + /// + /// + /// You can use the GrantId in a ListGrants, RetireGrant, or RevokeGrant operation. #[serde(default, skip_serializing_if = "Option::is_none", rename = "grantID")] pub grant_id: Option, - /// The grant token. - /// Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) in the Key Management Service Developer Guide. + /// The grant token. + /// + /// + /// Use a grant token when your permission to call this operation comes from + /// a new grant that has not yet achieved eventual consistency. For more information, + /// see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + /// and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + /// in the Key Management Service Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "grantToken")] pub grant_token: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GrantStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GrantStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/keys.rs b/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/keys.rs index 5c459120c..459c3df49 100644 --- a/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/keys.rs +++ b/kube-custom-resources-rs/src/kms_services_k8s_aws/v1alpha1/keys.rs @@ -12,82 +12,265 @@ use serde::{Serialize, Deserialize}; #[kube(status = "KeyStatus")] #[kube(schema = "disabled")] pub struct KeySpec { - /// A flag to indicate whether to bypass the key policy lockout safety check. - /// Setting this value to true increases the risk that the KMS key becomes unmanageable. Do not set this value to true indiscriminately. - /// For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section in the Key Management Service Developer Guide . - /// Use this parameter only when you include a policy in the request and you intend to prevent the principal that is making the request from making a subsequent PutKeyPolicy request on the KMS key. - /// The default value is false. + /// A flag to indicate whether to bypass the key policy lockout safety check. + /// + /// + /// Setting this value to true increases the risk that the KMS key becomes unmanageable. + /// Do not set this value to true indiscriminately. + /// + /// + /// For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + /// section in the Key Management Service Developer Guide . + /// + /// + /// Use this parameter only when you include a policy in the request and you + /// intend to prevent the principal that is making the request from making a + /// subsequent PutKeyPolicy request on the KMS key. + /// + /// + /// The default value is false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "bypassPolicyLockoutSafetyCheck")] pub bypass_policy_lockout_safety_check: Option, - /// Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) and the key material in its associated CloudHSM cluster. To create a KMS key in a custom key store, you must also specify the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs, each in a different Availability Zone in the Region. - /// This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store. - /// To find the ID of a custom key store, use the DescribeCustomKeyStores operation. - /// The response includes the custom key store ID and the ID of the CloudHSM cluster. - /// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a single-tenant key store. + /// Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + /// and the key material in its associated CloudHSM cluster. To create a KMS + /// key in a custom key store, you must also specify the Origin parameter with + /// a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the + /// custom key store must have at least two active HSMs, each in a different + /// Availability Zone in the Region. + /// + /// + /// This parameter is valid only for symmetric encryption KMS keys in a single + /// Region. You cannot create any other type of KMS key in a custom key store. + /// + /// + /// To find the ID of a custom key store, use the DescribeCustomKeyStores operation. + /// + /// + /// The response includes the custom key store ID and the ID of the CloudHSM + /// cluster. + /// + /// + /// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + /// feature in KMS, which combines the convenience and extensive integration + /// of KMS with the isolation and control of a single-tenant key store. #[serde(default, skip_serializing_if = "Option::is_none", rename = "customKeyStoreID")] pub custom_key_store_id: Option, - /// A description of the KMS key. - /// Use a description that helps you decide whether the KMS key is appropriate for a task. The default value is an empty string (no description). - /// To set or change the description after the key is created, use UpdateKeyDescription. + /// A description of the KMS key. + /// + /// + /// Use a description that helps you decide whether the KMS key is appropriate + /// for a task. The default value is an empty string (no description). + /// + /// + /// To set or change the description after the key is created, use UpdateKeyDescription. #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableKeyRotation")] pub enable_key_rotation: Option, - /// Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) in the Key Management Service Developer Guide . - /// The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) in the Key Management Service Developer Guide . - /// Amazon Web Services services that are integrated with KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration) use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys. - /// KMS supports the following key specs for KMS keys: - /// * Symmetric encryption key (default) SYMMETRIC_DEFAULT - /// * HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512 - /// * Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096 - /// * Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1) ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) - /// * Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. - /// * SM2 key pairs (China Regions only) SM2 + /// Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, + /// creates a KMS key with a 256-bit AES-GCM key that is used for encryption + /// and decryption, except in China Regions, where it creates a 128-bit symmetric + /// key that uses SM4 encryption. For help choosing a key spec for your KMS key, + /// see Choosing a KMS key type (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) + /// in the Key Management Service Developer Guide . + /// + /// + /// The KeySpec determines whether the KMS key contains a symmetric key or an + /// asymmetric key pair. It also determines the cryptographic algorithms that + /// the KMS key supports. You can't change the KeySpec after the KMS key is created. + /// To further restrict the algorithms that can be used with the KMS key, use + /// a condition key in its key policy or IAM policy. For more information, see + /// kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), + /// kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) + /// or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) + /// in the Key Management Service Developer Guide . + /// + /// + /// Amazon Web Services services that are integrated with KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration) + /// use symmetric encryption KMS keys to protect your data. These services do + /// not support asymmetric KMS keys or HMAC KMS keys. + /// + /// + /// KMS supports the following key specs for KMS keys: + /// + /// + /// * Symmetric encryption key (default) SYMMETRIC_DEFAULT + /// + /// + /// * HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512 + /// + /// + /// * Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096 + /// + /// + /// * Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1) + /// ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) + /// + /// + /// * Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1), + /// commonly used for cryptocurrencies. + /// + /// + /// * SM2 key pairs (China Regions only) SM2 #[serde(default, skip_serializing_if = "Option::is_none", rename = "keySpec")] pub key_spec: Option, - /// Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created. - /// Select only one valid value. - /// * For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. - /// * For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC. - /// * For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY. - /// * For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY. - /// * For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT or SIGN_VERIFY. + /// Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + /// for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. + /// This parameter is optional when you are creating a symmetric encryption KMS + /// key; otherwise, it is required. You can't change the KeyUsage value after + /// the KMS key is created. + /// + /// + /// Select only one valid value. + /// + /// + /// * For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. + /// + /// + /// * For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC. + /// + /// + /// * For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT + /// or SIGN_VERIFY. + /// + /// + /// * For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY. + /// + /// + /// * For asymmetric KMS keys with SM2 key material (China Regions only), + /// specify ENCRYPT_DECRYPT or SIGN_VERIFY. #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyUsage")] pub key_usage: Option, - /// Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key. - /// For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False. - /// This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) in the Key Management Service Developer Guide. - /// This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation. - /// You can create a multi-Region version of a symmetric encryption KMS key, an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material. However, you cannot create a multi-Region key in a custom key store. + /// Creates a multi-Region primary key that you can replicate into other Amazon + /// Web Services Regions. You cannot change this value after you create the KMS + /// key. + /// + /// + /// For a multi-Region key, set this parameter to True. For a single-Region KMS + /// key, omit this parameter or set it to False. The default value is False. + /// + /// + /// This operation supports multi-Region keys, an KMS feature that lets you create + /// multiple interoperable KMS keys in different Amazon Web Services Regions. + /// Because these KMS keys have the same key ID, key material, and other metadata, + /// you can use them interchangeably to encrypt data in one Amazon Web Services + /// Region and decrypt it in a different Amazon Web Services Region without re-encrypting + /// the data or making a cross-Region call. For more information about multi-Region + /// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) + /// in the Key Management Service Developer Guide. + /// + /// + /// This value creates a primary key, not a replica. To create a replica key, + /// use the ReplicateKey operation. + /// + /// + /// You can create a multi-Region version of a symmetric encryption KMS key, + /// an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material. + /// However, you cannot create a multi-Region key in a custom key store. #[serde(default, skip_serializing_if = "Option::is_none", rename = "multiRegion")] pub multi_region: Option, - /// The source of the key material for the KMS key. You cannot change the origin after you create the KMS key. The default is AWS_KMS, which means that KMS creates the key material. - /// To create a KMS key with no key material (for imported key material), set the value to EXTERNAL. For more information about importing key material into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in the Key Management Service Developer Guide. This value is valid only for symmetric encryption KMS keys. - /// To create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) and create its key material in the associated CloudHSM cluster, set this value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to identify the custom key store. This value is valid only for symmetric encryption KMS keys. + /// The source of the key material for the KMS key. You cannot change the origin + /// after you create the KMS key. The default is AWS_KMS, which means that KMS + /// creates the key material. + /// + /// + /// To create a KMS key with no key material (for imported key material), set + /// the value to EXTERNAL. For more information about importing key material + /// into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) + /// in the Key Management Service Developer Guide. This value is valid only for + /// symmetric encryption KMS keys. + /// + /// + /// To create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + /// and create its key material in the associated CloudHSM cluster, set this + /// value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to + /// identify the custom key store. This value is valid only for symmetric encryption + /// KMS keys. #[serde(default, skip_serializing_if = "Option::is_none")] pub origin: Option, - /// The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) in the Key Management Service Developer Guide. - /// If you provide a key policy, it must meet the following criteria: - /// * If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy must allow the principal that is making the CreateKey request to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) section of the Key Management Service Developer Guide . - /// * Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal (for example, an IAM user or role), you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) in the Amazon Web Services Identity and Access Management User Guide. - /// A key policy document can include only the following characters: - /// * Printable ASCII characters from the space character (\u0020) through the end of the ASCII character range. - /// * Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF). - /// * The tab (\u0009), line feed (\u000A), and carriage return (\u000D) special characters - /// For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in the Identity and Access Management User Guide . + /// The key policy to attach to the KMS key. If you do not specify a key policy, + /// KMS attaches a default key policy to the KMS key. For more information, see + /// Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + /// in the Key Management Service Developer Guide. + /// + /// + /// If you provide a key policy, it must meet the following criteria: + /// + /// + /// * If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy + /// must allow the principal that is making the CreateKey request to make + /// a subsequent PutKeyPolicy request on the KMS key. This reduces the risk + /// that the KMS key becomes unmanageable. For more information, refer to + /// the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + /// section of the Key Management Service Developer Guide . + /// + /// + /// * Each statement in the key policy must contain one or more principals. + /// The principals in the key policy must exist and be visible to KMS. When + /// you create a new Amazon Web Services principal (for example, an IAM user + /// or role), you might need to enforce a delay before including the new principal + /// in a key policy because the new principal might not be immediately visible + /// to KMS. For more information, see Changes that I make are not always immediately + /// visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + /// in the Amazon Web Services Identity and Access Management User Guide. + /// + /// + /// A key policy document can include only the following characters: + /// + /// + /// * Printable ASCII characters from the space character (\u0020) through + /// the end of the ASCII character range. + /// + /// + /// * Printable characters in the Basic Latin and Latin-1 Supplement character + /// set (through \u00FF). + /// + /// + /// * The tab (\u0009), line feed (\u000A), and carriage return (\u000D) special + /// characters + /// + /// + /// For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + /// in the Key Management Service Developer Guide. For help writing and formatting + /// a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) + /// in the Identity and Access Management User Guide . #[serde(default, skip_serializing_if = "Option::is_none")] pub policy: Option, - /// Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation. - /// Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key Management Service Developer Guide. - /// To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) permission in an IAM policy. - /// Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one. - /// When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). + /// Assigns one or more tags to the KMS key. Use this parameter to tag the KMS + /// key when it is created. To tag an existing KMS key, use the TagResource operation. + /// + /// + /// Tagging or untagging a KMS key can allow or deny permission to the KMS key. + /// For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) + /// in the Key Management Service Developer Guide. + /// + /// + /// To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) + /// permission in an IAM policy. + /// + /// + /// Each tag consists of a tag key and a tag value. Both the tag key and the + /// tag value are required, but the tag value can be an empty (null) string. + /// You cannot have more than one tag on a KMS key with the same tag key. If + /// you specify an existing tag key with a different tag value, KMS replaces + /// the current tag value with the specified one. + /// + /// + /// When you add tags to an Amazon Web Services resource, Amazon Web Services + /// generates a cost allocation report with usage and costs aggregated by tags. + /// Tags can also be used to control access to a KMS key. For details, see Tagging + /// Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty (null) strings. -/// For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) in the Amazon Web Services Billing and Cost Management User Guide. +/// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and +/// tag values are both required, but tag values can be empty (null) strings. +/// +/// +/// For information about the rules that apply to tag keys and tag values, see +/// User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +/// in the Amazon Web Services Billing and Cost Management User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct KeyTags { #[serde(default, skip_serializing_if = "Option::is_none", rename = "tagKey")] @@ -99,83 +282,154 @@ pub struct KeyTags { /// KeyStatus defines the observed state of Key #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct KeyStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// The twelve-digit account ID of the Amazon Web Services account that owns the KMS key. + /// The twelve-digit account ID of the Amazon Web Services account that owns + /// the KMS key. #[serde(default, skip_serializing_if = "Option::is_none", rename = "awsAccountID")] pub aws_account_id: Option, - /// The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), KMS creates the key material for the KMS key in the associated CloudHSM cluster. This value is present only when the KMS key is created in a custom key store. + /// The cluster ID of the CloudHSM cluster that contains the key material for + /// the KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), + /// KMS creates the key material for the KMS key in the associated CloudHSM cluster. + /// This value is present only when the KMS key is created in a custom key store. #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloudHsmClusterID")] pub cloud_hsm_cluster_id: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The date and time when the KMS key was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationDate")] pub creation_date: Option, - /// The date and time after which KMS deletes this KMS key. This value is present only when the KMS key is scheduled for deletion, that is, when its KeyState is PendingDeletion. - /// When the primary key in a multi-Region key is scheduled for deletion but still has replica keys, its key state is PendingReplicaDeletion and the length of its waiting period is displayed in the PendingDeletionWindowInDays field. + /// The date and time after which KMS deletes this KMS key. This value is present + /// only when the KMS key is scheduled for deletion, that is, when its KeyState + /// is PendingDeletion. + /// + /// + /// When the primary key in a multi-Region key is scheduled for deletion but + /// still has replica keys, its key state is PendingReplicaDeletion and the length + /// of its waiting period is displayed in the PendingDeletionWindowInDays field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "deletionDate")] pub deletion_date: Option, - /// Specifies whether the KMS key is enabled. When KeyState is Enabled this value is true, otherwise it is false. + /// Specifies whether the KMS key is enabled. When KeyState is Enabled this value + /// is true, otherwise it is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option, - /// The encryption algorithms that the KMS key supports. You cannot use the KMS key with other encryption algorithms within KMS. - /// This value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT. + /// The encryption algorithms that the KMS key supports. You cannot use the KMS + /// key with other encryption algorithms within KMS. + /// + /// + /// This value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT. #[serde(default, skip_serializing_if = "Option::is_none", rename = "encryptionAlgorithms")] pub encryption_algorithms: Option>, - /// Specifies whether the KMS key's key material expires. This value is present only when Origin is EXTERNAL, otherwise this value is omitted. + /// Specifies whether the KMS key's key material expires. This value is present + /// only when Origin is EXTERNAL, otherwise this value is omitted. #[serde(default, skip_serializing_if = "Option::is_none", rename = "expirationModel")] pub expiration_model: Option, /// The globally unique identifier for the KMS key. #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyID")] pub key_id: Option, - /// The manager of the KMS key. KMS keys in your Amazon Web Services account are either customer managed or Amazon Web Services managed. For more information about the difference, see KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) in the Key Management Service Developer Guide. + /// The manager of the KMS key. KMS keys in your Amazon Web Services account + /// are either customer managed or Amazon Web Services managed. For more information + /// about the difference, see KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) + /// in the Key Management Service Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyManager")] pub key_manager: Option, - /// The current status of the KMS key. - /// For more information about how key state affects the use of a KMS key, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the Key Management Service Developer Guide. + /// The current status of the KMS key. + /// + /// + /// For more information about how key state affects the use of a KMS key, see + /// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + /// in the Key Management Service Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "keyState")] pub key_state: Option, - /// The message authentication code (MAC) algorithm that the HMAC KMS key supports. - /// This value is present only when the KeyUsage of the KMS key is GENERATE_VERIFY_MAC. + /// The message authentication code (MAC) algorithm that the HMAC KMS key supports. + /// + /// + /// This value is present only when the KeyUsage of the KMS key is GENERATE_VERIFY_MAC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "macAlgorithms")] pub mac_algorithms: Option>, - /// Lists the primary and replica keys in same multi-Region key. This field is present only when the value of the MultiRegion field is True. - /// For more information about any listed KMS key, use the DescribeKey operation. - /// * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA key. - /// * PrimaryKey displays the key ARN and Region of the primary key. This field displays the current KMS key if it is the primary key. - /// * ReplicaKeys displays the key ARNs and Regions of all replica keys. This field includes the current KMS key if it is a replica key. + /// Lists the primary and replica keys in same multi-Region key. This field is + /// present only when the value of the MultiRegion field is True. + /// + /// + /// For more information about any listed KMS key, use the DescribeKey operation. + /// + /// + /// * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA + /// key. + /// + /// + /// * PrimaryKey displays the key ARN and Region of the primary key. This + /// field displays the current KMS key if it is the primary key. + /// + /// + /// * ReplicaKeys displays the key ARNs and Regions of all replica keys. This + /// field includes the current KMS key if it is a replica key. #[serde(default, skip_serializing_if = "Option::is_none", rename = "multiRegionConfiguration")] pub multi_region_configuration: Option, - /// The waiting period before the primary key in a multi-Region key is deleted. This waiting period begins when the last of its replica keys is deleted. This value is present only when the KeyState of the KMS key is PendingReplicaDeletion. That indicates that the KMS key is the primary key in a multi-Region key, it is scheduled for deletion, and it still has existing replica keys. - /// When a single-Region KMS key or a multi-Region replica key is scheduled for deletion, its deletion date is displayed in the DeletionDate field. However, when the primary key in a multi-Region key is scheduled for deletion, its waiting period doesn't begin until all of its replica keys are deleted. This value displays that waiting period. When the last replica key in the multi-Region key is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion to PendingDeletion and the deletion date appears in the DeletionDate field. + /// The waiting period before the primary key in a multi-Region key is deleted. + /// This waiting period begins when the last of its replica keys is deleted. + /// This value is present only when the KeyState of the KMS key is PendingReplicaDeletion. + /// That indicates that the KMS key is the primary key in a multi-Region key, + /// it is scheduled for deletion, and it still has existing replica keys. + /// + /// + /// When a single-Region KMS key or a multi-Region replica key is scheduled for + /// deletion, its deletion date is displayed in the DeletionDate field. However, + /// when the primary key in a multi-Region key is scheduled for deletion, its + /// waiting period doesn't begin until all of its replica keys are deleted. This + /// value displays that waiting period. When the last replica key in the multi-Region + /// key is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion + /// to PendingDeletion and the deletion date appears in the DeletionDate field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingDeletionWindowInDays")] pub pending_deletion_window_in_days: Option, - /// The signing algorithms that the KMS key supports. You cannot use the KMS key with other signing algorithms within KMS. - /// This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY. + /// The signing algorithms that the KMS key supports. You cannot use the KMS + /// key with other signing algorithms within KMS. + /// + /// + /// This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY. #[serde(default, skip_serializing_if = "Option::is_none", rename = "signingAlgorithms")] pub signing_algorithms: Option>, - /// The time at which the imported key material expires. When the key material expires, KMS deletes the key material and the KMS key becomes unusable. This value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. + /// The time at which the imported key material expires. When the key material + /// expires, KMS deletes the key material and the KMS key becomes unusable. This + /// value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel + /// is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. #[serde(default, skip_serializing_if = "Option::is_none", rename = "validTo")] pub valid_to: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct KeyStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct KeyStatusConditions { /// Last time the condition transitioned from one status to another. @@ -194,11 +448,23 @@ pub struct KeyStatusConditions { pub r#type: String, } -/// Lists the primary and replica keys in same multi-Region key. This field is present only when the value of the MultiRegion field is True. -/// For more information about any listed KMS key, use the DescribeKey operation. -/// * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA key. -/// * PrimaryKey displays the key ARN and Region of the primary key. This field displays the current KMS key if it is the primary key. -/// * ReplicaKeys displays the key ARNs and Regions of all replica keys. This field includes the current KMS key if it is a replica key. +/// Lists the primary and replica keys in same multi-Region key. This field is +/// present only when the value of the MultiRegion field is True. +/// +/// +/// For more information about any listed KMS key, use the DescribeKey operation. +/// +/// +/// * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA +/// key. +/// +/// +/// * PrimaryKey displays the key ARN and Region of the primary key. This +/// field displays the current KMS key if it is the primary key. +/// +/// +/// * ReplicaKeys displays the key ARNs and Regions of all replica keys. This +/// field includes the current KMS key if it is a replica key. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct KeyStatusMultiRegionConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "multiRegionKeyType")] diff --git a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/admissionchecks.rs b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/admissionchecks.rs index 090500137..0ec3fb8f6 100644 --- a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/admissionchecks.rs +++ b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/admissionchecks.rs @@ -11,13 +11,18 @@ use serde::{Serialize, Deserialize}; #[kube(status = "AdmissionCheckStatus")] #[kube(schema = "disabled")] pub struct AdmissionCheckSpec { - /// controllerName is name of the controller which will actually perform the checks. This is the name with which controller identifies with, not necessarily a K8S Pod or Deployment name. Cannot be empty. + /// controllerName is name of the controller which will actually perform + /// the checks. This is the name with which controller identifies with, + /// not necessarily a K8S Pod or Deployment name. Cannot be empty. #[serde(rename = "controllerName")] pub controller_name: String, /// Parameters identifies the resource providing additional check parameters. #[serde(default, skip_serializing_if = "Option::is_none")] pub parameters: Option, - /// RetryDelayMinutes specifies how long to keep the workload suspended after a failed check (after it transitioned to False). After that the check state goes to "Unknown". The default is 15 min. + /// RetryDelayMinutes specifies how long to keep the workload suspended + /// after a failed check (after it transitioned to False). + /// After that the check state goes to "Unknown". + /// The default is 15 min. #[serde(default, skip_serializing_if = "Option::is_none", rename = "retryDelayMinutes")] pub retry_delay_minutes: Option, } @@ -37,36 +42,77 @@ pub struct AdmissionCheckParameters { /// AdmissionCheckStatus defines the observed state of AdmissionCheck #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct AdmissionCheckStatus { - /// conditions hold the latest available observations of the AdmissionCheck current state. + /// conditions hold the latest available observations of the AdmissionCheck + /// current state. #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct AdmissionCheckStatusConditions { - /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + /// lastTransitionTime is the last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. #[serde(rename = "lastTransitionTime")] pub last_transition_time: String, - /// message is a human readable message indicating details about the transition. This may be an empty string. + /// message is a human readable message indicating details about the transition. + /// This may be an empty string. pub message: String, - /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + /// observedGeneration represents the .metadata.generation that the condition was set based upon. + /// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + /// with respect to the current state of the instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] pub observed_generation: Option, - /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. + /// Producers of specific condition types may define expected values and meanings for this field, + /// and whether the values are considered a guaranteed API. + /// The value should be a CamelCase string. + /// This field may not be empty. pub reason: String, /// status of the condition, one of True, False, Unknown. pub status: AdmissionCheckStatusConditionsStatus, - /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + /// type of condition in CamelCase or in foo.example.com/CamelCase. + /// --- + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + /// useful (see .node.status.conditions), the ability to deconflict is important. + /// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) #[serde(rename = "type")] pub r#type: String, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum AdmissionCheckStatusConditionsStatus { True, diff --git a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/clusterqueues.rs b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/clusterqueues.rs index c3aba5cc1..f34a13f41 100644 --- a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/clusterqueues.rs +++ b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/clusterqueues.rs @@ -16,129 +16,267 @@ pub struct ClusterQueueSpec { /// admissionChecks lists the AdmissionChecks required by this ClusterQueue #[serde(default, skip_serializing_if = "Option::is_none", rename = "admissionChecks")] pub admission_checks: Option>, - /// cohort that this ClusterQueue belongs to. CQs that belong to the same cohort can borrow unused resources from each other. - /// A CQ can be a member of a single borrowing cohort. A workload submitted to a queue referencing this CQ can borrow quota from any CQ in the cohort. Only quota for the [resource, flavor] pairs listed in the CQ can be borrowed. If empty, this ClusterQueue cannot borrow from any other ClusterQueue and vice versa. - /// A cohort is a name that links CQs together, but it doesn't reference any object. - /// Validation of a cohort name is equivalent to that of object names: subdomain in DNS (RFC 1123). + /// cohort that this ClusterQueue belongs to. CQs that belong to the + /// same cohort can borrow unused resources from each other. + /// + /// + /// A CQ can be a member of a single borrowing cohort. A workload submitted + /// to a queue referencing this CQ can borrow quota from any CQ in the cohort. + /// Only quota for the [resource, flavor] pairs listed in the CQ can be + /// borrowed. + /// If empty, this ClusterQueue cannot borrow from any other ClusterQueue and + /// vice versa. + /// + /// + /// A cohort is a name that links CQs together, but it doesn't reference any + /// object. + /// + /// + /// Validation of a cohort name is equivalent to that of object names: + /// subdomain in DNS (RFC 1123). #[serde(default, skip_serializing_if = "Option::is_none")] pub cohort: Option, - /// flavorFungibility defines whether a workload should try the next flavor before borrowing or preempting in the flavor being evaluated. + /// flavorFungibility defines whether a workload should try the next flavor + /// before borrowing or preempting in the flavor being evaluated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "flavorFungibility")] pub flavor_fungibility: Option, - /// namespaceSelector defines which namespaces are allowed to submit workloads to this clusterQueue. Beyond this basic support for policy, an policy agent like Gatekeeper should be used to enforce more advanced policies. Defaults to null which is a nothing selector (no namespaces eligible). If set to an empty selector `{}`, then all namespaces are eligible. + /// namespaceSelector defines which namespaces are allowed to submit workloads to + /// this clusterQueue. Beyond this basic support for policy, a policy agent like + /// Gatekeeper should be used to enforce more advanced policies. + /// Defaults to null which is a nothing selector (no namespaces eligible). + /// If set to an empty selector `{}`, then all namespaces are eligible. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// preemption describes policies to preempt Workloads from this ClusterQueue or the ClusterQueue's cohort. - /// Preemption can happen in two scenarios: - /// - When a Workload fits within the nominal quota of the ClusterQueue, but the quota is currently borrowed by other ClusterQueues in the cohort. Preempting Workloads in other ClusterQueues allows this ClusterQueue to reclaim its nominal quota. - When a Workload doesn't fit within the nominal quota of the ClusterQueue and there are admitted Workloads in the ClusterQueue with lower priority. - /// The preemption algorithm tries to find a minimal set of Workloads to preempt to accomomdate the pending Workload, preempting Workloads with lower priority first. + /// preemption describes policies to preempt Workloads from this ClusterQueue + /// or the ClusterQueue's cohort. + /// + /// + /// Preemption can happen in two scenarios: + /// + /// + /// - When a Workload fits within the nominal quota of the ClusterQueue, but + /// the quota is currently borrowed by other ClusterQueues in the cohort. + /// Preempting Workloads in other ClusterQueues allows this ClusterQueue to + /// reclaim its nominal quota. + /// - When a Workload doesn't fit within the nominal quota of the ClusterQueue + /// and there are admitted Workloads in the ClusterQueue with lower priority. + /// + /// + /// The preemption algorithm tries to find a minimal set of Workloads to + /// preempt to accomomdate the pending Workload, preempting Workloads with + /// lower priority first. #[serde(default, skip_serializing_if = "Option::is_none")] pub preemption: Option, - /// QueueingStrategy indicates the queueing strategy of the workloads across the queues in this ClusterQueue. This field is immutable. Current Supported Strategies: - /// - StrictFIFO: workloads are ordered strictly by creation time. Older workloads that can't be admitted will block admitting newer workloads even if they fit available quota. - BestEffortFIFO: workloads are ordered by creation time, however older workloads that can't be admitted will not block admitting newer workloads that fit existing quota. + /// QueueingStrategy indicates the queueing strategy of the workloads + /// across the queues in this ClusterQueue. This field is immutable. + /// Current Supported Strategies: + /// + /// + /// - StrictFIFO: workloads are ordered strictly by creation time. + /// Older workloads that can't be admitted will block admitting newer + /// workloads even if they fit available quota. + /// - BestEffortFIFO: workloads are ordered by creation time, + /// however older workloads that can't be admitted will not block + /// admitting newer workloads that fit existing quota. #[serde(default, skip_serializing_if = "Option::is_none", rename = "queueingStrategy")] pub queueing_strategy: Option, - /// resourceGroups describes groups of resources. Each resource group defines the list of resources and a list of flavors that provide quotas for these resources. Each resource and each flavor can only form part of one resource group. resourceGroups can be up to 16. + /// resourceGroups describes groups of resources. + /// Each resource group defines the list of resources and a list of flavors + /// that provide quotas for these resources. + /// Each resource and each flavor can only form part of one resource group. + /// resourceGroups can be up to 16. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceGroups")] pub resource_groups: Option>, - /// stopPolicy - if set to a value different than None, the ClusterQueue is considered Inactive, no new reservation being made. - /// Depending on its value, its associated workloads will: - /// - None - Workloads are admitted - HoldAndDrain - Admitted workloads are evicted and Reserving workloads will cancel the reservation. - Hold - Admitted workloads will run to completion and Reserving workloads will cancel the reservation. + /// stopPolicy - if set to a value different from None, the ClusterQueue is considered Inactive, no new reservation being + /// made. + /// + /// + /// Depending on its value, its associated workloads will: + /// + /// + /// - None - Workloads are admitted + /// - HoldAndDrain - Admitted workloads are evicted and Reserving workloads will cancel the reservation. + /// - Hold - Admitted workloads will run to completion and Reserving workloads will cancel the reservation. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stopPolicy")] pub stop_policy: Option, } -/// flavorFungibility defines whether a workload should try the next flavor before borrowing or preempting in the flavor being evaluated. +/// flavorFungibility defines whether a workload should try the next flavor +/// before borrowing or preempting in the flavor being evaluated. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueFlavorFungibility { - /// whenCanBorrow determines whether a workload should try the next flavor before borrowing in current flavor. The possible values are: - /// - `Borrow` (default): allocate in current flavor if borrowing is possible. - `TryNextFlavor`: try next flavor even if the current flavor has enough resources to borrow. + /// whenCanBorrow determines whether a workload should try the next flavor + /// before borrowing in current flavor. The possible values are: + /// + /// + /// - `Borrow` (default): allocate in current flavor if borrowing + /// is possible. + /// - `TryNextFlavor`: try next flavor even if the current + /// flavor has enough resources to borrow. #[serde(default, skip_serializing_if = "Option::is_none", rename = "whenCanBorrow")] pub when_can_borrow: Option, - /// whenCanPreempt determines whether a workload should try the next flavor before borrowing in current flavor. The possible values are: - /// - `Preempt`: allocate in current flavor if it's possible to preempt some workloads. - `TryNextFlavor` (default): try next flavor even if there are enough candidates for preemption in the current flavor. + /// whenCanPreempt determines whether a workload should try the next flavor + /// before borrowing in current flavor. The possible values are: + /// + /// + /// - `Preempt`: allocate in current flavor if it's possible to preempt some workloads. + /// - `TryNextFlavor` (default): try next flavor even if there are enough + /// candidates for preemption in the current flavor. #[serde(default, skip_serializing_if = "Option::is_none", rename = "whenCanPreempt")] pub when_can_preempt: Option, } -/// flavorFungibility defines whether a workload should try the next flavor before borrowing or preempting in the flavor being evaluated. +/// flavorFungibility defines whether a workload should try the next flavor +/// before borrowing or preempting in the flavor being evaluated. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ClusterQueueFlavorFungibilityWhenCanBorrow { Borrow, TryNextFlavor, } -/// flavorFungibility defines whether a workload should try the next flavor before borrowing or preempting in the flavor being evaluated. +/// flavorFungibility defines whether a workload should try the next flavor +/// before borrowing or preempting in the flavor being evaluated. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ClusterQueueFlavorFungibilityWhenCanPreempt { Preempt, TryNextFlavor, } -/// namespaceSelector defines which namespaces are allowed to submit workloads to this clusterQueue. Beyond this basic support for policy, an policy agent like Gatekeeper should be used to enforce more advanced policies. Defaults to null which is a nothing selector (no namespaces eligible). If set to an empty selector `{}`, then all namespaces are eligible. +/// namespaceSelector defines which namespaces are allowed to submit workloads to +/// this clusterQueue. Beyond this basic support for policy, a policy agent like +/// Gatekeeper should be used to enforce more advanced policies. +/// Defaults to null which is a nothing selector (no namespaces eligible). +/// If set to an empty selector `{}`, then all namespaces are eligible. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// preemption describes policies to preempt Workloads from this ClusterQueue or the ClusterQueue's cohort. -/// Preemption can happen in two scenarios: -/// - When a Workload fits within the nominal quota of the ClusterQueue, but the quota is currently borrowed by other ClusterQueues in the cohort. Preempting Workloads in other ClusterQueues allows this ClusterQueue to reclaim its nominal quota. - When a Workload doesn't fit within the nominal quota of the ClusterQueue and there are admitted Workloads in the ClusterQueue with lower priority. -/// The preemption algorithm tries to find a minimal set of Workloads to preempt to accomomdate the pending Workload, preempting Workloads with lower priority first. +/// preemption describes policies to preempt Workloads from this ClusterQueue +/// or the ClusterQueue's cohort. +/// +/// +/// Preemption can happen in two scenarios: +/// +/// +/// - When a Workload fits within the nominal quota of the ClusterQueue, but +/// the quota is currently borrowed by other ClusterQueues in the cohort. +/// Preempting Workloads in other ClusterQueues allows this ClusterQueue to +/// reclaim its nominal quota. +/// - When a Workload doesn't fit within the nominal quota of the ClusterQueue +/// and there are admitted Workloads in the ClusterQueue with lower priority. +/// +/// +/// The preemption algorithm tries to find a minimal set of Workloads to +/// preempt to accomomdate the pending Workload, preempting Workloads with +/// lower priority first. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueuePreemption { - /// borrowWithinCohort provides configuration to allow preemption within cohort while borrowing. + /// borrowWithinCohort provides configuration to allow preemption within + /// cohort while borrowing. #[serde(default, skip_serializing_if = "Option::is_none", rename = "borrowWithinCohort")] pub borrow_within_cohort: Option, - /// reclaimWithinCohort determines whether a pending Workload can preempt Workloads from other ClusterQueues in the cohort that are using more than their nominal quota. The possible values are: - /// - `Never` (default): do not preempt Workloads in the cohort. - `LowerPriority`: if the pending Workload fits within the nominal quota of its ClusterQueue, only preempt Workloads in the cohort that have lower priority than the pending Workload. - `Any`: if the pending Workload fits within the nominal quota of its ClusterQueue, preempt any Workload in the cohort, irrespective of priority. + /// reclaimWithinCohort determines whether a pending Workload can preempt + /// Workloads from other ClusterQueues in the cohort that are using more than + /// their nominal quota. The possible values are: + /// + /// + /// - `Never` (default): do not preempt Workloads in the cohort. + /// - `LowerPriority`: if the pending Workload fits within the nominal + /// quota of its ClusterQueue, only preempt Workloads in the cohort that have + /// lower priority than the pending Workload. + /// - `Any`: if the pending Workload fits within the nominal quota of its + /// ClusterQueue, preempt any Workload in the cohort, irrespective of + /// priority. #[serde(default, skip_serializing_if = "Option::is_none", rename = "reclaimWithinCohort")] pub reclaim_within_cohort: Option, - /// withinClusterQueue determines whether a pending Workload that doesn't fit within the nominal quota for its ClusterQueue, can preempt active Workloads in the ClusterQueue. The possible values are: - /// - `Never` (default): do not preempt Workloads in the ClusterQueue. - `LowerPriority`: only preempt Workloads in the ClusterQueue that have lower priority than the pending Workload. - `LowerOrNewerEqualPriority`: only preempt Workloads in the ClusterQueue that either have a lower priority than the pending workload or equal priority and are newer than the pending workload. + /// withinClusterQueue determines whether a pending Workload that doesn't fit + /// within the nominal quota for its ClusterQueue, can preempt active Workloads in + /// the ClusterQueue. The possible values are: + /// + /// + /// - `Never` (default): do not preempt Workloads in the ClusterQueue. + /// - `LowerPriority`: only preempt Workloads in the ClusterQueue that have + /// lower priority than the pending Workload. + /// - `LowerOrNewerEqualPriority`: only preempt Workloads in the ClusterQueue that + /// either have a lower priority than the pending workload or equal priority + /// and are newer than the pending workload. #[serde(default, skip_serializing_if = "Option::is_none", rename = "withinClusterQueue")] pub within_cluster_queue: Option, } -/// borrowWithinCohort provides configuration to allow preemption within cohort while borrowing. +/// borrowWithinCohort provides configuration to allow preemption within +/// cohort while borrowing. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueuePreemptionBorrowWithinCohort { - /// maxPriorityThreshold allows to restrict the set of workloads which might be preempted by a borrowing workload, to only workloads with priority less than or equal to the specified threshold priority. When the threshold is not specified, then any workload satisfying the policy can be preempted by the borrowing workload. + /// maxPriorityThreshold allows to restrict the set of workloads which + /// might be preempted by a borrowing workload, to only workloads with + /// priority less than or equal to the specified threshold priority. + /// When the threshold is not specified, then any workload satisfying the + /// policy can be preempted by the borrowing workload. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxPriorityThreshold")] pub max_priority_threshold: Option, - /// policy determines the policy for preemption to reclaim quota within cohort while borrowing. Possible values are: - `Never` (default): do not allow for preemption, in other ClusterQueues within the cohort, for a borrowing workload. - `LowerPriority`: allow preemption, in other ClusterQueues within the cohort, for a borrowing workload, but only if the preempted workloads are of lower priority. + /// policy determines the policy for preemption to reclaim quota within cohort while borrowing. + /// Possible values are: + /// - `Never` (default): do not allow for preemption, in other + /// ClusterQueues within the cohort, for a borrowing workload. + /// - `LowerPriority`: allow preemption, in other ClusterQueues + /// within the cohort, for a borrowing workload, but only if + /// the preempted workloads are of lower priority. #[serde(default, skip_serializing_if = "Option::is_none")] pub policy: Option, } -/// borrowWithinCohort provides configuration to allow preemption within cohort while borrowing. +/// borrowWithinCohort provides configuration to allow preemption within +/// cohort while borrowing. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ClusterQueuePreemptionBorrowWithinCohortPolicy { Never, LowerPriority, } -/// preemption describes policies to preempt Workloads from this ClusterQueue or the ClusterQueue's cohort. -/// Preemption can happen in two scenarios: -/// - When a Workload fits within the nominal quota of the ClusterQueue, but the quota is currently borrowed by other ClusterQueues in the cohort. Preempting Workloads in other ClusterQueues allows this ClusterQueue to reclaim its nominal quota. - When a Workload doesn't fit within the nominal quota of the ClusterQueue and there are admitted Workloads in the ClusterQueue with lower priority. -/// The preemption algorithm tries to find a minimal set of Workloads to preempt to accomomdate the pending Workload, preempting Workloads with lower priority first. +/// preemption describes policies to preempt Workloads from this ClusterQueue +/// or the ClusterQueue's cohort. +/// +/// +/// Preemption can happen in two scenarios: +/// +/// +/// - When a Workload fits within the nominal quota of the ClusterQueue, but +/// the quota is currently borrowed by other ClusterQueues in the cohort. +/// Preempting Workloads in other ClusterQueues allows this ClusterQueue to +/// reclaim its nominal quota. +/// - When a Workload doesn't fit within the nominal quota of the ClusterQueue +/// and there are admitted Workloads in the ClusterQueue with lower priority. +/// +/// +/// The preemption algorithm tries to find a minimal set of Workloads to +/// preempt to accomomdate the pending Workload, preempting Workloads with +/// lower priority first. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ClusterQueuePreemptionReclaimWithinCohort { Never, @@ -146,10 +284,24 @@ pub enum ClusterQueuePreemptionReclaimWithinCohort { Any, } -/// preemption describes policies to preempt Workloads from this ClusterQueue or the ClusterQueue's cohort. -/// Preemption can happen in two scenarios: -/// - When a Workload fits within the nominal quota of the ClusterQueue, but the quota is currently borrowed by other ClusterQueues in the cohort. Preempting Workloads in other ClusterQueues allows this ClusterQueue to reclaim its nominal quota. - When a Workload doesn't fit within the nominal quota of the ClusterQueue and there are admitted Workloads in the ClusterQueue with lower priority. -/// The preemption algorithm tries to find a minimal set of Workloads to preempt to accomomdate the pending Workload, preempting Workloads with lower priority first. +/// preemption describes policies to preempt Workloads from this ClusterQueue +/// or the ClusterQueue's cohort. +/// +/// +/// Preemption can happen in two scenarios: +/// +/// +/// - When a Workload fits within the nominal quota of the ClusterQueue, but +/// the quota is currently borrowed by other ClusterQueues in the cohort. +/// Preempting Workloads in other ClusterQueues allows this ClusterQueue to +/// reclaim its nominal quota. +/// - When a Workload doesn't fit within the nominal quota of the ClusterQueue +/// and there are admitted Workloads in the ClusterQueue with lower priority. +/// +/// +/// The preemption algorithm tries to find a minimal set of Workloads to +/// preempt to accomomdate the pending Workload, preempting Workloads with +/// lower priority first. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ClusterQueuePreemptionWithinClusterQueue { Never, @@ -168,30 +320,73 @@ pub enum ClusterQueueQueueingStrategy { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueResourceGroups { - /// coveredResources is the list of resources covered by the flavors in this group. Examples: cpu, memory, vendor.com/gpu. The list cannot be empty and it can contain up to 16 resources. + /// coveredResources is the list of resources covered by the flavors in this + /// group. + /// Examples: cpu, memory, vendor.com/gpu. + /// The list cannot be empty and it can contain up to 16 resources. #[serde(rename = "coveredResources")] pub covered_resources: Vec, - /// flavors is the list of flavors that provide the resources of this group. Typically, different flavors represent different hardware models (e.g., gpu models, cpu architectures) or pricing models (on-demand vs spot cpus). Each flavor MUST list all the resources listed for this group in the same order as the .resources field. The list cannot be empty and it can contain up to 16 flavors. + /// flavors is the list of flavors that provide the resources of this group. + /// Typically, different flavors represent different hardware models + /// (e.g., gpu models, cpu architectures) or pricing models (on-demand vs spot + /// cpus). + /// Each flavor MUST list all the resources listed for this group in the same + /// order as the .resources field. + /// The list cannot be empty and it can contain up to 16 flavors. pub flavors: Vec, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueResourceGroupsFlavors { - /// name of this flavor. The name should match the .metadata.name of a ResourceFlavor. If a matching ResourceFlavor does not exist, the ClusterQueue will have an Active condition set to False. + /// name of this flavor. The name should match the .metadata.name of a + /// ResourceFlavor. If a matching ResourceFlavor does not exist, the + /// ClusterQueue will have an Active condition set to False. pub name: String, - /// resources is the list of quotas for this flavor per resource. There could be up to 16 resources. + /// resources is the list of quotas for this flavor per resource. + /// There could be up to 16 resources. pub resources: Vec, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueResourceGroupsFlavorsResources { - /// borrowingLimit is the maximum amount of quota for the [flavor, resource] combination that this ClusterQueue is allowed to borrow from the unused quota of other ClusterQueues in the same cohort. In total, at a given time, Workloads in a ClusterQueue can consume a quantity of quota equal to nominalQuota+borrowingLimit, assuming the other ClusterQueues in the cohort have enough unused quota. If null, it means that there is no borrowing limit. If not null, it must be non-negative. borrowingLimit must be null if spec.cohort is empty. + /// borrowingLimit is the maximum amount of quota for the [flavor, resource] + /// combination that this ClusterQueue is allowed to borrow from the unused + /// quota of other ClusterQueues in the same cohort. + /// In total, at a given time, Workloads in a ClusterQueue can consume a + /// quantity of quota equal to nominalQuota+borrowingLimit, assuming the other + /// ClusterQueues in the cohort have enough unused quota. + /// If null, it means that there is no borrowing limit. + /// If not null, it must be non-negative. + /// borrowingLimit must be null if spec.cohort is empty. #[serde(default, skip_serializing_if = "Option::is_none", rename = "borrowingLimit")] pub borrowing_limit: Option, + /// lendingLimit is the maximum amount of unused quota for the [flavor, resource] + /// combination that this ClusterQueue can lend to other ClusterQueues in the same cohort. + /// In total, at a given time, ClusterQueue reserves for its exclusive use + /// a quantity of quota equals to nominalQuota - lendingLimit. + /// If null, it means that there is no lending limit, meaning that + /// all the nominalQuota can be borrowed by other clusterQueues in the cohort. + /// If not null, it must be non-negative. + /// lendingLimit must be null if spec.cohort is empty. + /// This field is in alpha stage. To be able to use this field, + /// enable the feature gate LendingLimit, which is disabled by default. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "lendingLimit")] + pub lending_limit: Option, /// name of this resource. pub name: String, - /// nominalQuota is the quantity of this resource that is available for Workloads admitted by this ClusterQueue at a point in time. The nominalQuota must be non-negative. nominalQuota should represent the resources in the cluster available for running jobs (after discounting resources consumed by system components and pods not managed by kueue). In an autoscaled cluster, nominalQuota should account for resources that can be provided by a component such as Kubernetes cluster-autoscaler. - /// If the ClusterQueue belongs to a cohort, the sum of the quotas for each (flavor, resource) combination defines the maximum quantity that can be allocated by a ClusterQueue in the cohort. + /// nominalQuota is the quantity of this resource that is available for + /// Workloads admitted by this ClusterQueue at a point in time. + /// The nominalQuota must be non-negative. + /// nominalQuota should represent the resources in the cluster available for + /// running jobs (after discounting resources consumed by system components + /// and pods not managed by kueue). In an autoscaled cluster, nominalQuota + /// should account for resources that can be provided by a component such as + /// Kubernetes cluster-autoscaler. + /// + /// + /// If the ClusterQueue belongs to a cohort, the sum of the quotas for each + /// (flavor, resource) combination defines the maximum quantity that can be + /// allocated by a ClusterQueue in the cohort. #[serde(rename = "nominalQuota")] pub nominal_quota: IntOrString, } @@ -207,54 +402,101 @@ pub enum ClusterQueueStopPolicy { /// ClusterQueueStatus defines the observed state of ClusterQueue #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueStatus { - /// admittedWorkloads is the number of workloads currently admitted to this clusterQueue and haven't finished yet. + /// admittedWorkloads is the number of workloads currently admitted to this + /// clusterQueue and haven't finished yet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "admittedWorkloads")] pub admitted_workloads: Option, - /// conditions hold the latest available observations of the ClusterQueue current state. + /// conditions hold the latest available observations of the ClusterQueue + /// current state. #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// flavorsReservation are the reserved quotas, by flavor, currently in use by the workloads assigned to this ClusterQueue. + /// flavorsReservation are the reserved quotas, by flavor, currently in use by the + /// workloads assigned to this ClusterQueue. #[serde(default, skip_serializing_if = "Option::is_none", rename = "flavorsReservation")] pub flavors_reservation: Option>, - /// flavorsUsage are the used quotas, by flavor, currently in use by the workloads admitted in this ClusterQueue. + /// flavorsUsage are the used quotas, by flavor, currently in use by the + /// workloads admitted in this ClusterQueue. #[serde(default, skip_serializing_if = "Option::is_none", rename = "flavorsUsage")] pub flavors_usage: Option>, - /// pendingWorkloads is the number of workloads currently waiting to be admitted to this clusterQueue. + /// pendingWorkloads is the number of workloads currently waiting to be + /// admitted to this clusterQueue. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingWorkloads")] pub pending_workloads: Option, - /// PendingWorkloadsStatus contains the information exposed about the current status of the pending workloads in the cluster queue. + /// PendingWorkloadsStatus contains the information exposed about the current + /// status of the pending workloads in the cluster queue. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingWorkloadsStatus")] pub pending_workloads_status: Option, - /// reservingWorkloads is the number of workloads currently reserving quota in this clusterQueue. + /// reservingWorkloads is the number of workloads currently reserving quota in this + /// clusterQueue. #[serde(default, skip_serializing_if = "Option::is_none", rename = "reservingWorkloads")] pub reserving_workloads: Option, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueStatusConditions { - /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + /// lastTransitionTime is the last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. #[serde(rename = "lastTransitionTime")] pub last_transition_time: String, - /// message is a human readable message indicating details about the transition. This may be an empty string. + /// message is a human readable message indicating details about the transition. + /// This may be an empty string. pub message: String, - /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + /// observedGeneration represents the .metadata.generation that the condition was set based upon. + /// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + /// with respect to the current state of the instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] pub observed_generation: Option, - /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. + /// Producers of specific condition types may define expected values and meanings for this field, + /// and whether the values are considered a guaranteed API. + /// The value should be a CamelCase string. + /// This field may not be empty. pub reason: String, /// status of the condition, one of True, False, Unknown. pub status: ClusterQueueStatusConditionsStatus, - /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + /// type of condition in CamelCase or in foo.example.com/CamelCase. + /// --- + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + /// useful (see .node.status.conditions), the ability to deconflict is important. + /// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) #[serde(rename = "type")] pub r#type: String, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ClusterQueueStatusConditionsStatus { True, @@ -272,12 +514,14 @@ pub struct ClusterQueueStatusFlavorsReservation { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueStatusFlavorsReservationResources { - /// Borrowed is quantity of quota that is borrowed from the cohort. In other words, it's the used quota that is over the nominalQuota. + /// Borrowed is quantity of quota that is borrowed from the cohort. In other + /// words, it's the used quota that is over the nominalQuota. #[serde(default, skip_serializing_if = "Option::is_none")] pub borrowed: Option, /// name of the resource pub name: String, - /// total is the total quantity of used quota, including the amount borrowed from the cohort. + /// total is the total quantity of used quota, including the amount borrowed + /// from the cohort. #[serde(default, skip_serializing_if = "Option::is_none")] pub total: Option, } @@ -292,17 +536,20 @@ pub struct ClusterQueueStatusFlavorsUsage { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueStatusFlavorsUsageResources { - /// Borrowed is quantity of quota that is borrowed from the cohort. In other words, it's the used quota that is over the nominalQuota. + /// Borrowed is quantity of quota that is borrowed from the cohort. In other + /// words, it's the used quota that is over the nominalQuota. #[serde(default, skip_serializing_if = "Option::is_none")] pub borrowed: Option, /// name of the resource pub name: String, - /// total is the total quantity of used quota, including the amount borrowed from the cohort. + /// total is the total quantity of used quota, including the amount borrowed + /// from the cohort. #[serde(default, skip_serializing_if = "Option::is_none")] pub total: Option, } -/// PendingWorkloadsStatus contains the information exposed about the current status of the pending workloads in the cluster queue. +/// PendingWorkloadsStatus contains the information exposed about the current +/// status of the pending workloads in the cluster queue. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueStatusPendingWorkloadsStatus { /// Head contains the list of top pending workloads. @@ -313,7 +560,8 @@ pub struct ClusterQueueStatusPendingWorkloadsStatus { pub last_change_time: String, } -/// ClusterQueuePendingWorkload contains the information identifying a pending workload in the cluster queue. +/// ClusterQueuePendingWorkload contains the information identifying a pending workload +/// in the cluster queue. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ClusterQueueStatusPendingWorkloadsStatusClusterQueuePendingWorkload { /// Name indicates the name of the pending workload. diff --git a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/localqueues.rs b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/localqueues.rs index c139b3a7b..80470e2a5 100644 --- a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/localqueues.rs +++ b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/localqueues.rs @@ -21,51 +21,96 @@ pub struct LocalQueueSpec { /// LocalQueueStatus defines the observed state of LocalQueue #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct LocalQueueStatus { - /// admittedWorkloads is the number of workloads in this LocalQueue admitted to a ClusterQueue and that haven't finished yet. + /// admittedWorkloads is the number of workloads in this LocalQueue + /// admitted to a ClusterQueue and that haven't finished yet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "admittedWorkloads")] pub admitted_workloads: Option, - /// Conditions hold the latest available observations of the LocalQueue current state. + /// Conditions hold the latest available observations of the LocalQueue + /// current state. #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// flavorsUsage are the used quotas, by flavor currently in use by the workloads assigned to this LocalQueue. + /// flavorsUsage are the used quotas, by flavor currently in use by the + /// workloads assigned to this LocalQueue. #[serde(default, skip_serializing_if = "Option::is_none", rename = "flavorUsage")] pub flavor_usage: Option>, - /// flavorsReservation are the reserved quotas, by flavor currently in use by the workloads assigned to this LocalQueue. + /// flavorsReservation are the reserved quotas, by flavor currently in use by the + /// workloads assigned to this LocalQueue. #[serde(default, skip_serializing_if = "Option::is_none", rename = "flavorsReservation")] pub flavors_reservation: Option>, /// PendingWorkloads is the number of Workloads in the LocalQueue not yet admitted to a ClusterQueue #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingWorkloads")] pub pending_workloads: Option, - /// reservingWorkloads is the number of workloads in this LocalQueue reserving quota in a ClusterQueue and that haven't finished yet. + /// reservingWorkloads is the number of workloads in this LocalQueue + /// reserving quota in a ClusterQueue and that haven't finished yet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "reservingWorkloads")] pub reserving_workloads: Option, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct LocalQueueStatusConditions { - /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + /// lastTransitionTime is the last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. #[serde(rename = "lastTransitionTime")] pub last_transition_time: String, - /// message is a human readable message indicating details about the transition. This may be an empty string. + /// message is a human readable message indicating details about the transition. + /// This may be an empty string. pub message: String, - /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + /// observedGeneration represents the .metadata.generation that the condition was set based upon. + /// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + /// with respect to the current state of the instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] pub observed_generation: Option, - /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. + /// Producers of specific condition types may define expected values and meanings for this field, + /// and whether the values are considered a guaranteed API. + /// The value should be a CamelCase string. + /// This field may not be empty. pub reason: String, /// status of the condition, one of True, False, Unknown. pub status: LocalQueueStatusConditionsStatus, - /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + /// type of condition in CamelCase or in foo.example.com/CamelCase. + /// --- + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + /// useful (see .node.status.conditions), the ability to deconflict is important. + /// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) #[serde(rename = "type")] pub r#type: String, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum LocalQueueStatusConditionsStatus { True, diff --git a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/resourceflavors.rs b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/resourceflavors.rs index b36716fe4..596d0df0b 100644 --- a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/resourceflavors.rs +++ b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/resourceflavors.rs @@ -11,30 +11,57 @@ use std::collections::BTreeMap; #[kube(group = "kueue.x-k8s.io", version = "v1beta1", kind = "ResourceFlavor", plural = "resourceflavors")] #[kube(schema = "disabled")] pub struct ResourceFlavorSpec { - /// nodeLabels are labels that associate the ResourceFlavor with Nodes that have the same labels. When a Workload is admitted, its podsets can only get assigned ResourceFlavors whose nodeLabels match the nodeSelector and nodeAffinity fields. Once a ResourceFlavor is assigned to a podSet, the ResourceFlavor's nodeLabels should be injected into the pods of the Workload by the controller that integrates with the Workload object. - /// nodeLabels can be up to 8 elements. + /// nodeLabels are labels that associate the ResourceFlavor with Nodes that + /// have the same labels. + /// When a Workload is admitted, its podsets can only get assigned + /// ResourceFlavors whose nodeLabels match the nodeSelector and nodeAffinity + /// fields. + /// Once a ResourceFlavor is assigned to a podSet, the ResourceFlavor's + /// nodeLabels should be injected into the pods of the Workload by the + /// controller that integrates with the Workload object. + /// + /// + /// nodeLabels can be up to 8 elements. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeLabels")] pub node_labels: Option>, - /// nodeTaints are taints that the nodes associated with this ResourceFlavor have. Workloads' podsets must have tolerations for these nodeTaints in order to get assigned this ResourceFlavor during admission. - /// An example of a nodeTaint is cloud.provider.com/preemptible="true":NoSchedule - /// nodeTaints can be up to 8 elements. + /// nodeTaints are taints that the nodes associated with this ResourceFlavor + /// have. + /// Workloads' podsets must have tolerations for these nodeTaints in order to + /// get assigned this ResourceFlavor during admission. + /// + /// + /// An example of a nodeTaint is + /// cloud.provider.com/preemptible="true":NoSchedule + /// + /// + /// nodeTaints can be up to 8 elements. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeTaints")] pub node_taints: Option>, - /// tolerations are extra tolerations that will be added to the pods admitted in the quota associated with this resource flavor. - /// An example of a toleration is cloud.provider.com/preemptible="true":NoSchedule - /// tolerations can be up to 8 elements. + /// tolerations are extra tolerations that will be added to the pods admitted in + /// the quota associated with this resource flavor. + /// + /// + /// An example of a toleration is + /// cloud.provider.com/preemptible="true":NoSchedule + /// + /// + /// tolerations can be up to 8 elements. #[serde(default, skip_serializing_if = "Option::is_none")] pub tolerations: Option>, } -/// The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. +/// The node this Taint is attached to has the "effect" on +/// any pod that does not tolerate the Taint. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ResourceFlavorNodeTaints { - /// Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + /// Required. The effect of the taint on pods + /// that do not tolerate the taint. + /// Valid effects are NoSchedule, PreferNoSchedule and NoExecute. pub effect: String, /// Required. The taint key to be applied to a node. pub key: String, - /// TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + /// TimeAdded represents the time at which the taint was added. + /// It is only written for NoExecute taints. #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeAdded")] pub time_added: Option, /// The taint value corresponding to the taint key. @@ -42,22 +69,32 @@ pub struct ResourceFlavorNodeTaints { pub value: Option, } -/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +/// The pod this Toleration is attached to tolerates any taint that matches +/// the triple using the matching operator . #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ResourceFlavorTolerations { - /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + /// Effect indicates the taint effect to match. Empty means match all taint effects. + /// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. #[serde(default, skip_serializing_if = "Option::is_none")] pub effect: Option, - /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. + /// If the key is empty, operator must be Exists; this combination means to match all values and all keys. #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option, - /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + /// Operator represents a key's relationship to the value. + /// Valid operators are Exists and Equal. Defaults to Equal. + /// Exists is equivalent to wildcard for value, so that a pod can + /// tolerate all taints of a particular category. #[serde(default, skip_serializing_if = "Option::is_none")] pub operator: Option, - /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + /// TolerationSeconds represents the period of time the toleration (which must be + /// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + /// it is not set, which means tolerate the taint forever (do not evict). Zero and + /// negative values will be treated as 0 (evict immediately) by the system. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] pub toleration_seconds: Option, - /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + /// Value is the taint value the toleration matches to. + /// If the operator is Exists, the value should be empty, otherwise just a regular string. #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, } diff --git a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/workloads.rs b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/workloads.rs index 94ddfea3b..f6f570217 100644 --- a/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/workloads.rs +++ b/kube-custom-resources-rs/src/kueue_x_k8s_io/v1beta1/workloads.rs @@ -14,24 +14,46 @@ use k8s_openapi::apimachinery::pkg::util::intstr::IntOrString; #[kube(status = "WorkloadStatus")] #[kube(schema = "disabled")] pub struct WorkloadSpec { - /// Active determines if a workload can be admitted into a queue. Changing active from true to false will evict any running workloads. Possible values are: - /// - false: indicates that a workload should never be admitted and evicts running workloads - true: indicates that a workload can be evaluated for admission into it's respective queue. - /// Defaults to true + /// Active determines if a workload can be admitted into a queue. + /// Changing active from true to false will evict any running workloads. + /// Possible values are: + /// + /// + /// - false: indicates that a workload should never be admitted and evicts running workloads + /// - true: indicates that a workload can be evaluated for admission into it's respective queue. + /// + /// + /// Defaults to true #[serde(default, skip_serializing_if = "Option::is_none")] pub active: Option, - /// podSets is a list of sets of homogeneous pods, each described by a Pod spec and a count. There must be at least one element and at most 8. podSets cannot be changed. + /// podSets is a list of sets of homogeneous pods, each described by a Pod spec + /// and a count. + /// There must be at least one element and at most 8. + /// podSets cannot be changed. #[serde(rename = "podSets")] pub pod_sets: Vec, - /// Priority determines the order of access to the resources managed by the ClusterQueue where the workload is queued. The priority value is populated from PriorityClassName. The higher the value, the higher the priority. If priorityClassName is specified, priority must not be null. + /// Priority determines the order of access to the resources managed by the + /// ClusterQueue where the workload is queued. + /// The priority value is populated from PriorityClassName. + /// The higher the value, the higher the priority. + /// If priorityClassName is specified, priority must not be null. #[serde(default, skip_serializing_if = "Option::is_none")] pub priority: Option, - /// If specified, indicates the workload's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the workload priority will be default or zero if there is no default. + /// If specified, indicates the workload's priority. + /// "system-node-critical" and "system-cluster-critical" are two special + /// keywords which indicate the highest priorities with the former being + /// the highest priority. Any other name must be defined by creating a + /// PriorityClass object with that name. If not specified, the workload + /// priority will be default or zero if there is no default. #[serde(default, skip_serializing_if = "Option::is_none", rename = "priorityClassName")] pub priority_class_name: Option, - /// priorityClassSource determines whether the priorityClass field refers to a pod PriorityClass or kueue.x-k8s.io/workloadpriorityclass. Workload's PriorityClass can accept the name of a pod priorityClass or a workloadPriorityClass. When using pod PriorityClass, a priorityClassSource field has the scheduling.k8s.io/priorityclass value. + /// priorityClassSource determines whether the priorityClass field refers to a pod PriorityClass or kueue.x-k8s.io/workloadpriorityclass. + /// Workload's PriorityClass can accept the name of a pod priorityClass or a workloadPriorityClass. + /// When using pod PriorityClass, a priorityClassSource field has the scheduling.k8s.io/priorityclass value. #[serde(default, skip_serializing_if = "Option::is_none", rename = "priorityClassSource")] pub priority_class_source: Option, - /// queueName is the name of the LocalQueue the Workload is associated with. queueName cannot be changed while .status.admission is not null. + /// queueName is the name of the LocalQueue the Workload is associated with. + /// queueName cannot be changed while .status.admission is not null. #[serde(default, skip_serializing_if = "Option::is_none", rename = "queueName")] pub queue_name: Option, } @@ -40,36 +62,71 @@ pub struct WorkloadSpec { pub struct WorkloadPodSets { /// count is the number of pods for the spec. pub count: i32, - /// minCount is the minimum number of pods for the spec acceptable if the workload supports partial admission. - /// If not provided, partial admission for the current PodSet is not enabled. - /// Only one podSet within the workload can use this. - /// This is an alpha field and requires enabling PartialAdmission feature gate. + /// minCount is the minimum number of pods for the spec acceptable + /// if the workload supports partial admission. + /// + /// + /// If not provided, partial admission for the current PodSet is not + /// enabled. + /// + /// + /// Only one podSet within the workload can use this. + /// + /// + /// This is an alpha field and requires enabling PartialAdmission feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "minCount")] pub min_count: Option, /// name is the PodSet name. pub name: String, - /// template is the Pod template. - /// The only allowed fields in template.metadata are labels and annotations. - /// If requests are omitted for a container or initContainer, they default to the limits if they are explicitly specified for the container or initContainer. - /// During admission, the rules in nodeSelector and nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution that match the keys in the nodeLabels from the ResourceFlavors considered for this Workload are used to filter the ResourceFlavors that can be assigned to this podSet. + /// template is the Pod template. + /// + /// + /// The only allowed fields in template.metadata are labels and annotations. + /// + /// + /// If requests are omitted for a container or initContainer, + /// they default to the limits if they are explicitly specified for the + /// container or initContainer. + /// + /// + /// During admission, the rules in nodeSelector and + /// nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution that match + /// the keys in the nodeLabels from the ResourceFlavors considered for this + /// Workload are used to filter the ResourceFlavors that can be assigned to + /// this podSet. pub template: WorkloadPodSetsTemplate, } -/// template is the Pod template. -/// The only allowed fields in template.metadata are labels and annotations. -/// If requests are omitted for a container or initContainer, they default to the limits if they are explicitly specified for the container or initContainer. -/// During admission, the rules in nodeSelector and nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution that match the keys in the nodeLabels from the ResourceFlavors considered for this Workload are used to filter the ResourceFlavors that can be assigned to this podSet. +/// template is the Pod template. +/// +/// +/// The only allowed fields in template.metadata are labels and annotations. +/// +/// +/// If requests are omitted for a container or initContainer, +/// they default to the limits if they are explicitly specified for the +/// container or initContainer. +/// +/// +/// During admission, the rules in nodeSelector and +/// nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution that match +/// the keys in the nodeLabels from the ResourceFlavors considered for this +/// Workload are used to filter the ResourceFlavors that can be assigned to +/// this podSet. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplate { - /// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + /// Standard object's metadata. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata #[serde(default, skip_serializing_if = "Option::is_none")] pub metadata: Option, - /// Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + /// Specification of the desired behavior of the pod. + /// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status #[serde(default, skip_serializing_if = "Option::is_none")] pub spec: Option, } -/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata +/// Standard object's metadata. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateMetadata { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -84,10 +141,13 @@ pub struct WorkloadPodSetsTemplateMetadata { pub namespace: Option, } -/// Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +/// Specification of the desired behavior of the pod. +/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpec { - /// Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer. + /// Optional duration in seconds the pod may be active on the node relative to + /// StartTime before the system will actively try to mark it failed and kill associated containers. + /// Value must be a positive integer. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activeDeadlineSeconds")] pub active_deadline_seconds: Option, /// If specified, the pod's scheduling constraints @@ -96,117 +156,255 @@ pub struct WorkloadPodSetsTemplateSpec { /// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. #[serde(default, skip_serializing_if = "Option::is_none", rename = "automountServiceAccountToken")] pub automount_service_account_token: Option, - /// List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. + /// List of containers belonging to the pod. + /// Containers cannot currently be added or removed. + /// There must be at least one container in a Pod. + /// Cannot be updated. pub containers: Vec, - /// Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. + /// Specifies the DNS parameters of a pod. + /// Parameters specified here will be merged to the generated DNS + /// configuration based on DNSPolicy. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsConfig")] pub dns_config: Option, - /// Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + /// Set DNS policy for the pod. + /// Defaults to "ClusterFirst". + /// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + /// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + /// To have DNS options set along with hostNetwork, you have to specify DNS policy + /// explicitly to 'ClusterFirstWithHostNet'. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dnsPolicy")] pub dns_policy: Option, - /// EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. + /// EnableServiceLinks indicates whether information about services should be injected into pod's + /// environment variables, matching the syntax of Docker links. + /// Optional: Defaults to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableServiceLinks")] pub enable_service_links: Option, - /// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + /// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + /// pod to perform user-initiated actions such as debugging. This list cannot be specified when + /// creating a pod, and it cannot be modified by updating the pod spec. In order to add an + /// ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ephemeralContainers")] pub ephemeral_containers: Option>, - /// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods. + /// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + /// file if specified. This is only valid for non-hostNetwork pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostAliases")] pub host_aliases: Option>, - /// Use the host's ipc namespace. Optional: Default to false. + /// Use the host's ipc namespace. + /// Optional: Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIPC")] pub host_ipc: Option, - /// Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false. + /// Host networking requested for this pod. Use the host's network namespace. + /// If this option is set, the ports that will be used must be specified. + /// Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostNetwork")] pub host_network: Option, - /// Use the host's pid namespace. Optional: Default to false. + /// Use the host's pid namespace. + /// Optional: Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPID")] pub host_pid: Option, - /// Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + /// Use the host's user namespace. + /// Optional: Default to true. + /// If set to true or not present, the pod will be run in the host user namespace, useful + /// for when the pod needs a feature only available to the host user namespace, such as + /// loading a kernel module with CAP_SYS_MODULE. + /// When set to false, a new userns is created for the pod. Setting false is useful for + /// mitigating container breakout vulnerabilities even allowing users to run their + /// containers as root without actually having root privileges on the host. + /// This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostUsers")] pub host_users: Option, - /// Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value. + /// Specifies the hostname of the Pod + /// If not specified, the pod's hostname will be set to a system-defined value. #[serde(default, skip_serializing_if = "Option::is_none")] pub hostname: Option, - /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + /// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + /// If specified, these secrets will be passed to individual puller implementations for them to use. + /// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullSecrets")] pub image_pull_secrets: Option>, - /// List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + /// List of initialization containers belonging to the pod. + /// Init containers are executed in order prior to containers being started. If any + /// init container fails, the pod is considered to have failed and is handled according + /// to its restartPolicy. The name for an init container or normal container must be + /// unique among all containers. + /// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + /// The resourceRequirements of an init container are taken into account during scheduling + /// by finding the highest request/limit for each resource type, and then using the max of + /// of that value or the sum of the normal containers. Limits are applied to init containers + /// in a similar fashion. + /// Init containers cannot currently be added or removed. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "initContainers")] pub init_containers: Option>, - /// NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. + /// NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + /// the scheduler simply schedules this pod onto that node, assuming that it fits resource + /// requirements. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeName")] pub node_name: Option, - /// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + /// NodeSelector is a selector which must be true for the pod to fit on a node. + /// Selector which must match a node's labels for the pod to be scheduled on that node. + /// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeSelector")] pub node_selector: Option>, - /// Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. - /// If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions - /// If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup + /// Specifies the OS of the containers in the pod. + /// Some pod and container fields are restricted if this is set. + /// + /// + /// If the OS field is set to linux, the following fields must be unset: + /// -securityContext.windowsOptions + /// + /// + /// If the OS field is set to windows, following fields must be unset: + /// - spec.hostPID + /// - spec.hostIPC + /// - spec.hostUsers + /// - spec.securityContext.seLinuxOptions + /// - spec.securityContext.seccompProfile + /// - spec.securityContext.fsGroup + /// - spec.securityContext.fsGroupChangePolicy + /// - spec.securityContext.sysctls + /// - spec.shareProcessNamespace + /// - spec.securityContext.runAsUser + /// - spec.securityContext.runAsGroup + /// - spec.securityContext.supplementalGroups + /// - spec.containers[*].securityContext.seLinuxOptions + /// - spec.containers[*].securityContext.seccompProfile + /// - spec.containers[*].securityContext.capabilities + /// - spec.containers[*].securityContext.readOnlyRootFilesystem + /// - spec.containers[*].securityContext.privileged + /// - spec.containers[*].securityContext.allowPrivilegeEscalation + /// - spec.containers[*].securityContext.procMount + /// - spec.containers[*].securityContext.runAsUser + /// - spec.containers[*].securityContext.runAsGroup #[serde(default, skip_serializing_if = "Option::is_none")] pub os: Option, - /// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + /// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + /// This field will be autopopulated at admission time by the RuntimeClass admission controller. If + /// the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + /// The RuntimeClass admission controller will reject Pod create requests which have the overhead already + /// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + /// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + /// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub overhead: Option>, - /// PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. + /// PreemptionPolicy is the Policy for preempting pods with lower priority. + /// One of Never, PreemptLowerPriority. + /// Defaults to PreemptLowerPriority if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preemptionPolicy")] pub preemption_policy: Option, - /// The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. + /// The priority value. Various system components use this field to find the + /// priority of the pod. When Priority Admission Controller is enabled, it + /// prevents users from setting this field. The admission controller populates + /// this field from PriorityClassName. + /// The higher the value, the higher the priority. #[serde(default, skip_serializing_if = "Option::is_none")] pub priority: Option, - /// If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. + /// If specified, indicates the pod's priority. "system-node-critical" and + /// "system-cluster-critical" are two special keywords which indicate the + /// highest priorities with the former being the highest priority. Any other + /// name must be defined by creating a PriorityClass object with that name. + /// If not specified, the pod priority will be default or zero if there is no + /// default. #[serde(default, skip_serializing_if = "Option::is_none", rename = "priorityClassName")] pub priority_class_name: Option, - /// If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + /// If specified, all readiness gates will be evaluated for pod readiness. + /// A pod is ready when all its containers are ready AND + /// all conditions specified in the readiness gates have status equal to "True" + /// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessGates")] pub readiness_gates: Option>, - /// ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. + /// ResourceClaims defines which ResourceClaims must be allocated + /// and reserved before the Pod is allowed to start. The resources + /// will be made available to those containers which consume them + /// by name. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaims")] pub resource_claims: Option>, - /// Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + /// Restart policy for all containers within the pod. + /// One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + /// Default to Always. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + /// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + /// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + /// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + /// empty definition that uses the default runtime handler. + /// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class #[serde(default, skip_serializing_if = "Option::is_none", rename = "runtimeClassName")] pub runtime_class_name: Option, - /// If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler. + /// If specified, the pod will be dispatched by specified scheduler. + /// If not specified, the pod will be dispatched by default scheduler. #[serde(default, skip_serializing_if = "Option::is_none", rename = "schedulerName")] pub scheduler_name: Option, - /// SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. - /// SchedulingGates can only be set at pod creation time, and be removed only afterwards. - /// This is a beta feature enabled by the PodSchedulingReadiness feature gate. + /// SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + /// If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + /// scheduler will not attempt to schedule the pod. + /// + /// + /// SchedulingGates can only be set at pod creation time, and be removed only afterwards. + /// + /// + /// This is a beta feature enabled by the PodSchedulingReadiness feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "schedulingGates")] pub scheduling_gates: Option>, - /// SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. + /// SecurityContext holds pod-level security attributes and common container settings. + /// Optional: Defaults to empty. See type description for default values of each field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, - /// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. + /// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + /// Deprecated: Use serviceAccountName instead. #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccount")] pub service_account: Option, - /// ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + /// ServiceAccountName is the name of the ServiceAccount to use to run this pod. + /// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccountName")] pub service_account_name: Option, - /// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false. + /// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + /// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + /// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + /// If a pod does not have FQDN, this has no effect. + /// Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "setHostnameAsFQDN")] pub set_hostname_as_fqdn: Option, - /// Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. + /// Share a single process namespace between all of the containers in a pod. + /// When this is set containers will be able to view and signal processes from other containers + /// in the same pod, and the first process in each container will not be assigned PID 1. + /// HostPID and ShareProcessNamespace cannot both be set. + /// Optional: Default to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "shareProcessNamespace")] pub share_process_namespace: Option, - /// If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all. + /// If specified, the fully qualified Pod hostname will be "...svc.". + /// If not specified, the pod will not have a domainname at all. #[serde(default, skip_serializing_if = "Option::is_none")] pub subdomain: Option, - /// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds. + /// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// If this value is nil, the default grace period will be used instead. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// Defaults to 30 seconds. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, /// If specified, the pod's tolerations. #[serde(default, skip_serializing_if = "Option::is_none")] pub tolerations: Option>, - /// TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed. + /// TopologySpreadConstraints describes how a group of pods ought to spread across topology + /// domains. Scheduler will schedule pods in a way which abides by the constraints. + /// All topologySpreadConstraints are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "topologySpreadConstraints")] pub topology_spread_constraints: Option>, - /// List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes + /// List of volumes that can be mounted by containers belonging to the pod. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes #[serde(default, skip_serializing_if = "Option::is_none")] pub volumes: Option>, } @@ -228,15 +426,28 @@ pub struct WorkloadPodSetsTemplateSpecAffinity { /// Describes node affinity scheduling rules for the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinity { - /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + /// The scheduler will prefer to schedule pods to nodes that satisfy + /// the affinity expressions specified by this field, but it may choose + /// a node that violates one or more of the expressions. The node that is + /// most preferred is the one with the greatest sum of weights, i.e. + /// for each node that meets all of the scheduling requirements (resource + /// request, requiredDuringScheduling affinity expressions, etc.), + /// compute a sum by iterating through the elements of this field and adding + /// "weight" to the sum if the node matches the corresponding matchExpressions; the + /// node(s) with the highest sum are the most preferred. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] pub preferred_during_scheduling_ignored_during_execution: Option>, - /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + /// If the affinity requirements specified by this field are not met at + /// scheduling time, the pod will not be scheduled onto the node. + /// If the affinity requirements specified by this field cease to be met + /// at some point during pod execution (e.g. due to an update), the system + /// may or may not try to eventually evict the pod from its node. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] pub required_during_scheduling_ignored_during_execution: Option, } -/// An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +/// An empty preferred scheduling term matches all objects with implicit weight 0 +/// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution { /// A node selector term, associated with the corresponding weight. @@ -256,31 +467,47 @@ pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityPreferredDuringSchedul pub match_fields: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. +/// If the affinity requirements specified by this field are not met at +/// scheduling time, the pod will not be scheduled onto the node. +/// If the affinity requirements specified by this field cease to be met +/// at some point during pod execution (e.g. due to an update), the system +/// may or may not try to eventually evict the pod from its node. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution { /// Required. A list of node selector terms. The terms are ORed. @@ -288,7 +515,9 @@ pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityRequiredDuringScheduli pub node_selector_terms: Vec, } -/// A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +/// A null or empty node selector term matches no objects. The requirements of +/// them are ANDed. +/// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms { /// A list of node selector requirements by node's labels. @@ -299,26 +528,38 @@ pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityRequiredDuringScheduli pub match_fields: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A node selector requirement is a selector that contains values, a key, and an operator +/// that relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields { /// The label key that the selector applies to. pub key: String, - /// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + /// Represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. pub operator: String, - /// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + /// An array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. If the operator is Gt or Lt, the values + /// array must have a single element, which will be interpreted as an integer. + /// This array is replaced during a strategic merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -326,10 +567,24 @@ pub struct WorkloadPodSetsTemplateSpecAffinityNodeAffinityRequiredDuringScheduli /// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinity { - /// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + /// The scheduler will prefer to schedule pods to nodes that satisfy + /// the affinity expressions specified by this field, but it may choose + /// a node that violates one or more of the expressions. The node that is + /// most preferred is the one with the greatest sum of weights, i.e. + /// for each node that meets all of the scheduling requirements (resource + /// request, requiredDuringScheduling affinity expressions, etc.), + /// compute a sum by iterating through the elements of this field and adding + /// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + /// node(s) with the highest sum are the most preferred. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] pub preferred_during_scheduling_ignored_during_execution: Option>, - /// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + /// If the affinity requirements specified by this field are not met at + /// scheduling time, the pod will not be scheduled onto the node. + /// If the affinity requirements specified by this field cease to be met + /// at some point during pod execution (e.g. due to a pod label update), the + /// system may or may not try to eventually evict the pod from its node. + /// When there are multiple elements, the lists of nodes corresponding to each + /// podAffinityTerm are intersected, i.e. all terms must be satisfied. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] pub required_during_scheduling_ignored_during_execution: Option>, } @@ -340,144 +595,244 @@ pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityPreferredDuringScheduli /// Required. A pod affinity term, associated with the corresponding weight. #[serde(rename = "podAffinityTerm")] pub pod_affinity_term: WorkloadPodSetsTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, - /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + /// weight associated with matching the corresponding podAffinityTerm, + /// in the range 1-100. pub weight: i32, } /// Required. A pod affinity term, associated with the corresponding weight. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +/// Defines a set of pods (namely those matching the labelSelector +/// relative to the given namespace(s)) that this pod should be +/// co-located (affinity) or not co-located (anti-affinity) with, +/// where co-located is defined as running on a node whose value of +/// the label with key matches that of any node on which +/// a pod of the set of pods is running #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -485,10 +840,24 @@ pub struct WorkloadPodSetsTemplateSpecAffinityPodAffinityRequiredDuringSchedulin /// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinity { - /// The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + /// The scheduler will prefer to schedule pods to nodes that satisfy + /// the anti-affinity expressions specified by this field, but it may choose + /// a node that violates one or more of the expressions. The node that is + /// most preferred is the one with the greatest sum of weights, i.e. + /// for each node that meets all of the scheduling requirements (resource + /// request, requiredDuringScheduling anti-affinity expressions, etc.), + /// compute a sum by iterating through the elements of this field and adding + /// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + /// node(s) with the highest sum are the most preferred. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredDuringSchedulingIgnoredDuringExecution")] pub preferred_during_scheduling_ignored_during_execution: Option>, - /// If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + /// If the anti-affinity requirements specified by this field are not met at + /// scheduling time, the pod will not be scheduled onto the node. + /// If the anti-affinity requirements specified by this field cease to be met + /// at some point during pod execution (e.g. due to a pod label update), the + /// system may or may not try to eventually evict the pod from its node. + /// When there are multiple elements, the lists of nodes corresponding to each + /// podAffinityTerm are intersected, i.e. all terms must be satisfied. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requiredDuringSchedulingIgnoredDuringExecution")] pub required_during_scheduling_ignored_during_execution: Option>, } @@ -499,144 +868,244 @@ pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityPreferredDuringSche /// Required. A pod affinity term, associated with the corresponding weight. #[serde(rename = "podAffinityTerm")] pub pod_affinity_term: WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm, - /// weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + /// weight associated with matching the corresponding podAffinityTerm, + /// in the range 1-100. pub weight: i32, } /// Required. A pod affinity term, associated with the corresponding weight. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running +/// Defines a set of pods (namely those matching the labelSelector +/// relative to the given namespace(s)) that this pod should be +/// co-located (affinity) or not co-located (anti-affinity) with, +/// where co-located is defined as running on a node whose value of +/// the label with key matches that of any node on which +/// a pod of the set of pods is running #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution { - /// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. + /// A label query over a set of resources, in this case pods. + /// If it's null, this PodAffinityTerm matches with no Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + /// MismatchLabelKeys is a set of pod label keys to select which pods will + /// be taken into consideration. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + /// to select the group of existing pods which pods will be taken into consideration + /// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + /// pod labels will be ignored. The default value is empty. + /// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + /// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + /// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mismatchLabelKeys")] pub mismatch_label_keys: Option>, - /// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + /// A label query over the set of namespaces that the term applies to. + /// The term is applied to the union of the namespaces selected by this field + /// and the ones listed in the namespaces field. + /// null selector and null or empty namespaces list means "this pod's namespace". + /// An empty selector ({}) matches all namespaces. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + /// namespaces specifies a static list of namespace names that the term applies to. + /// The term is applied to the union of the namespaces listed in this field + /// and the ones selected by namespaceSelector. + /// null or empty namespaces list and null namespaceSelector means "this pod's namespace". #[serde(default, skip_serializing_if = "Option::is_none")] pub namespaces: Option>, - /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + /// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + /// the labelSelector in the specified namespaces, where co-located is defined as running on a node + /// whose value of the label with key topologyKey matches that of any node on which any of the + /// selected pods is running. + /// Empty topologyKey is not allowed. #[serde(rename = "topologyKey")] pub topology_key: String, } -/// A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods. +/// A label query over a set of resources, in this case pods. +/// If it's null, this PodAffinityTerm matches with no Pods. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. +/// A label query over the set of namespaces that the term applies to. +/// The term is applied to the union of the namespaces selected by this field +/// and the ones listed in the namespaces field. +/// null selector and null or empty namespaces list means "this pod's namespace". +/// An empty selector ({}) matches all namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionNamespaceSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -644,75 +1113,166 @@ pub struct WorkloadPodSetsTemplateSpecAffinityPodAntiAffinityRequiredDuringSched /// A single application container that you want to run within a pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainers { - /// Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Arguments to the entrypoint. + /// The container image's CMD is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub args: Option>, - /// Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Entrypoint array. Not executed within a shell. + /// The container image's ENTRYPOINT is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, - /// List of environment variables to set in the container. Cannot be updated. + /// List of environment variables to set in the container. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub env: Option>, - /// List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + /// List of sources to populate environment variables in the container. + /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys + /// will be reported as an event when the container is starting. When a key exists in multiple + /// sources, the value associated with the last source will take precedence. + /// Values defined by an Env with a duplicate key will take precedence. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "envFrom")] pub env_from: Option>, - /// Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + /// Container image name. + /// More info: https://kubernetes.io/docs/concepts/containers/images + /// This field is optional to allow higher level config management to default or override + /// container images in workload controllers like Deployments and StatefulSets. #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + /// Image pull policy. + /// One of Always, Never, IfNotPresent. + /// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")] pub image_pull_policy: Option, - /// Actions that the management system should take in response to container lifecycle events. Cannot be updated. + /// Actions that the management system should take in response to container lifecycle events. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub lifecycle: Option, - /// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container liveness. + /// Container will be restarted if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + /// Name of the container specified as a DNS_LABEL. + /// Each container in a pod must have a unique name (DNS_LABEL). + /// Cannot be updated. pub name: String, - /// List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + /// List of ports to expose from the container. Not specifying a port here + /// DOES NOT prevent that port from being exposed. Any port which is + /// listening on the default "0.0.0.0" address inside a container will be + /// accessible from the network. + /// Modifying this array with strategic merge patch may corrupt the data. + /// For more information See https://github.com/kubernetes/kubernetes/issues/108255. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub ports: Option>, - /// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container service readiness. + /// Container will be removed from service endpoints if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessProbe")] pub readiness_probe: Option, /// Resources resize policy for the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resizePolicy")] pub resize_policy: Option>, - /// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Compute Resources required by this container. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, - /// RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. + /// RestartPolicy defines the restart behavior of individual containers in a pod. + /// This field may only be set for init containers, and the only allowed value is "Always". + /// For non-init containers or when this field is not specified, + /// the restart behavior is defined by the Pod's restart policy and the container type. + /// Setting the RestartPolicy as "Always" for the init container will have the following effect: + /// this init container will be continually restarted on + /// exit until all regular containers have terminated. Once all regular + /// containers have completed, all init containers with restartPolicy "Always" + /// will be shut down. This lifecycle differs from normal init containers and + /// is often referred to as a "sidecar" container. Although this init + /// container still starts in the init container sequence, it does not wait + /// for the container to complete before proceeding to the next init + /// container. Instead, the next init container starts immediately after this + /// init container is started, or after any startupProbe has successfully + /// completed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + /// SecurityContext defines the security options the container should be run with. + /// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + /// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, - /// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// StartupProbe indicates that the Pod has successfully initialized. + /// If specified, no other probes are executed until this completes successfully. + /// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + /// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + /// when it might take a long time to load data or warm a cache, than during steady-state operation. + /// This cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupProbe")] pub startup_probe: Option, - /// Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + /// Whether this container should allocate a buffer for stdin in the container runtime. If this + /// is not set, reads from stdin in the container will always result in EOF. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub stdin: Option, - /// Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + /// Whether the container runtime should close the stdin channel after it has been opened by + /// a single attach. When stdin is true the stdin stream will remain open across multiple attach + /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, + /// at which time stdin is closed and remains closed until the container is restarted. If this + /// flag is false, a container processes that reads from stdin will never receive an EOF. + /// Default is false #[serde(default, skip_serializing_if = "Option::is_none", rename = "stdinOnce")] pub stdin_once: Option, - /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + /// Optional: Path at which the file to which the container's termination message + /// will be written is mounted into the container's filesystem. + /// Message written is intended to be brief final status, such as an assertion failure message. + /// Will be truncated by the node if greater than 4096 bytes. The total message length across + /// all containers will be limited to 12kb. + /// Defaults to /dev/termination-log. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePath")] pub termination_message_path: Option, - /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + /// Indicate how the termination message should be populated. File will use the contents of + /// terminationMessagePath to populate the container status message on both success and failure. + /// FallbackToLogsOnError will use the last chunk of container log output if the termination + /// message file is empty and the container exited with an error. + /// The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + /// Defaults to File. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePolicy")] pub termination_message_policy: Option, - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub tty: Option, /// volumeDevices is the list of block devices to be used by the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeDevices")] pub volume_devices: Option>, - /// Pod volumes to mount into the container's filesystem. Cannot be updated. + /// Pod volumes to mount into the container's filesystem. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] pub volume_mounts: Option>, - /// Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + /// Container's working directory. + /// If not specified, the container runtime's default will be used, which + /// might be configured in the container image. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workingDir")] pub working_dir: Option, } @@ -722,7 +1282,15 @@ pub struct WorkloadPodSetsTemplateSpecContainers { pub struct WorkloadPodSetsTemplateSpecContainersEnv { /// Name of the environment variable. Must be a C_IDENTIFIER. pub name: String, - /// Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + /// Variable references $(VAR_NAME) are expanded + /// using the previously defined environment variables in the container and + /// any service environment variables. If a variable cannot be resolved, + /// the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + /// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + /// Escaped references will never be expanded, regardless of whether the variable + /// exists or not. + /// Defaults to "". #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, /// Source for the environment variable's value. Cannot be used if value is not empty. @@ -736,10 +1304,12 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFrom { /// Selects a key of a ConfigMap. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMapKeyRef")] pub config_map_key_ref: Option, - /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + /// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, /// Selects a key of a secret in the pod's namespace @@ -752,7 +1322,9 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFrom { pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFromConfigMapKeyRef { /// The key to select. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap or its key must be defined @@ -760,7 +1332,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFromConfigMapKeyRef { pub optional: Option, } -/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. +/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, +/// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFromFieldRef { /// Version of the schema the FieldPath is written in terms of, defaults to "v1". @@ -771,7 +1344,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFromFieldRef { pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFromResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -789,7 +1363,9 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFromResourceFieldRef { pub struct WorkloadPodSetsTemplateSpecContainersEnvValueFromSecretKeyRef { /// The key of the secret to select from. Must be a valid secret key. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret or its key must be defined @@ -814,7 +1390,9 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvFrom { /// The ConfigMap to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersEnvFromConfigMapRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap must be defined @@ -825,7 +1403,9 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvFromConfigMapRef { /// The Secret to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersEnvFromSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret must be defined @@ -833,18 +1413,33 @@ pub struct WorkloadPodSetsTemplateSpecContainersEnvFromSecretRef { pub optional: Option, } -/// Actions that the management system should take in response to container lifecycle events. Cannot be updated. +/// Actions that the management system should take in response to container lifecycle events. +/// Cannot be updated. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecycle { - /// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PostStart is called immediately after a container is created. If the handler fails, + /// the container is terminated and restarted according to its restart policy. + /// Other management of the container blocks until the hook completes. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "postStart")] pub post_start: Option, - /// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PreStop is called immediately before a container is terminated due to an + /// API request or management event such as liveness/startup probe failure, + /// preemption, resource contention, etc. The handler is not called if the + /// container crashes or exits. The Pod's termination grace period countdown begins before the + /// PreStop hook is executed. Regardless of the outcome of the handler, the + /// container will eventually terminate within the Pod's termination grace + /// period (unless delayed by finalizers). Other management of the container blocks until the hook completes + /// or until the termination grace period is reached. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "preStop")] pub pre_stop: Option, } -/// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PostStart is called immediately after a container is created. If the handler fails, +/// the container is terminated and restarted according to its restart policy. +/// Other management of the container blocks until the hook completes. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStart { /// Exec specifies the action to take. @@ -856,7 +1451,9 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStart { /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -864,7 +1461,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStart { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -872,7 +1473,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartExec { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -881,9 +1483,12 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -891,7 +1496,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -904,17 +1510,29 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartSleep { pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePostStartTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PreStop is called immediately before a container is terminated due to an +/// API request or management event such as liveness/startup probe failure, +/// preemption, resource contention, etc. The handler is not called if the +/// container crashes or exits. The Pod's termination grace period countdown begins before the +/// PreStop hook is executed. Regardless of the outcome of the handler, the +/// container will eventually terminate within the Pod's termination grace +/// period (unless delayed by finalizers). Other management of the container blocks until the hook completes +/// or until the termination grace period is reached. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStop { /// Exec specifies the action to take. @@ -926,7 +1544,9 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStop { /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -934,7 +1554,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStop { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -942,7 +1566,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopExec { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -951,9 +1576,12 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -961,7 +1589,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -974,23 +1603,31 @@ pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopSleep { pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLifecyclePreStopTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container liveness. +/// Container will be restarted if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -999,22 +1636,36 @@ pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1022,7 +1673,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1032,8 +1687,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeExec { pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1041,7 +1699,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1050,9 +1709,12 @@ pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1060,7 +1722,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1072,37 +1735,50 @@ pub struct WorkloadPodSetsTemplateSpecContainersLivenessProbeTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerPort represents a network port in a single container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersPorts { - /// Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + /// Number of port to expose on the pod's IP address. + /// This must be a valid port number, 0 < x < 65536. #[serde(rename = "containerPort")] pub container_port: i32, /// What host IP to bind the external port to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIP")] pub host_ip: Option, - /// Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + /// Number of port to expose on the host. + /// If specified, this must be a valid port number, 0 < x < 65536. + /// If HostNetwork is specified, this must match ContainerPort. + /// Most containers do not need this. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPort")] pub host_port: Option, - /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + /// named port in a pod must have a unique name. Name for the port that can be + /// referred to by services. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + /// Protocol for port. Must be UDP, TCP, or SCTP. + /// Defaults to "TCP". #[serde(default, skip_serializing_if = "Option::is_none")] pub protocol: Option, } -/// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container service readiness. +/// Container will be removed from service endpoints if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1111,22 +1787,36 @@ pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1134,7 +1824,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1144,8 +1838,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeExec { pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1153,7 +1850,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1162,9 +1860,12 @@ pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1172,7 +1873,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1184,33 +1886,49 @@ pub struct WorkloadPodSetsTemplateSpecContainersReadinessProbeTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerResizePolicy represents resource resize policy for the container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersResizePolicy { - /// Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + /// Name of the resource to which this resource resize policy applies. + /// Supported values: cpu, memory. #[serde(rename = "resourceName")] pub resource_name: String, - /// Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + /// Restart policy to apply when specified resource is resized. + /// If not specified, it defaults to NotRequired. #[serde(rename = "restartPolicy")] pub restart_policy: String, } -/// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +/// Compute Resources required by this container. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersResources { - /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. It can only be set for containers. + /// Claims lists the names of resources, defined in spec.resourceClaims, + /// that are used by this container. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. It can only be set for containers. #[serde(default, skip_serializing_if = "Option::is_none")] pub claims: Option>, - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -1218,49 +1936,95 @@ pub struct WorkloadPodSetsTemplateSpecContainersResources { /// ResourceClaim references one entry in PodSpec.ResourceClaims. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersResourcesClaims { - /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + /// Name must match the name of one entry in pod.spec.resourceClaims of + /// the Pod where this field is used. It makes that resource available + /// inside a container. pub name: String, } -/// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +/// SecurityContext defines the security options the container should be run with. +/// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. +/// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersSecurityContext { - /// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + /// AllowPrivilegeEscalation controls whether a process can gain more + /// privileges than its parent process. This bool directly controls if + /// the no_new_privs flag will be set on the container process. + /// AllowPrivilegeEscalation is true always when the container is: + /// 1) run as Privileged + /// 2) has CAP_SYS_ADMIN + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] pub allow_privilege_escalation: Option, - /// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + /// The capabilities to add/drop when running containers. + /// Defaults to the default set of capabilities granted by the container runtime. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option, - /// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + /// Run container in privileged mode. + /// Processes in privileged containers are essentially equivalent to root on the host. + /// Defaults to false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, - /// procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + /// procMount denotes the type of proc mount to use for the containers. + /// The default is DefaultProcMount which uses the container runtime defaults for + /// readonly paths and masked paths. + /// This requires the ProcMountType feature flag to be enabled. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] pub proc_mount: Option, - /// Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + /// Whether this container has a read-only root filesystem. + /// Default is false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] pub read_only_root_filesystem: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to the container. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by this container. If seccomp options are + /// provided at both the pod & container level, the container options + /// override the pod options. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options from the PodSecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. +/// The capabilities to add/drop when running containers. +/// Defaults to the default set of capabilities granted by the container runtime. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersSecurityContextCapabilities { /// Added capabilities @@ -1271,7 +2035,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersSecurityContextCapabilities { pub drop: Option>, } -/// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to the container. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in PodSecurityContext. If set in both SecurityContext and +/// PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -1288,42 +2056,71 @@ pub struct WorkloadPodSetsTemplateSpecContainersSecurityContextSeLinuxOptions { pub user: Option, } -/// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by this container. If seccomp options are +/// provided at both the pod & container level, the container options +/// override the pod options. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options from the PodSecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } -/// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// StartupProbe indicates that the Pod has successfully initialized. +/// If specified, no other probes are executed until this completes successfully. +/// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. +/// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, +/// when it might take a long time to load data or warm a cache, than during steady-state operation. +/// This cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersStartupProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1332,22 +2129,36 @@ pub struct WorkloadPodSetsTemplateSpecContainersStartupProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1355,7 +2166,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersStartupProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1365,8 +2180,11 @@ pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeExec { pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1374,7 +2192,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1383,9 +2202,12 @@ pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1393,7 +2215,8 @@ pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1405,7 +2228,9 @@ pub struct WorkloadPodSetsTemplateSpecContainersStartupProbeTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -1422,35 +2247,53 @@ pub struct WorkloadPodSetsTemplateSpecContainersVolumeDevices { /// VolumeMount describes a mounting of a Volume within a container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecContainersVolumeMounts { - /// Path within the container at which the volume should be mounted. Must not contain ':'. + /// Path within the container at which the volume should be mounted. Must + /// not contain ':'. #[serde(rename = "mountPath")] pub mount_path: String, - /// mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + /// mountPropagation determines how mounts are propagated from the host + /// to container and the other way around. + /// When not set, MountPropagationNone is used. + /// This field is beta in 1.10. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] pub mount_propagation: Option, /// This must match the Name of a Volume. pub name: String, - /// Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + /// Mounted read-only if true, read-write otherwise (false or unspecified). + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + /// Path within the volume from which the container's volume should be mounted. + /// Defaults to "" (volume's root). #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] pub sub_path: Option, - /// Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + /// Expanded path within the volume from which the container's volume should be mounted. + /// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + /// Defaults to "" (volume's root). + /// SubPathExpr and SubPath are mutually exclusive. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] pub sub_path_expr: Option, } -/// Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. +/// Specifies the DNS parameters of a pod. +/// Parameters specified here will be merged to the generated DNS +/// configuration based on DNSPolicy. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecDnsConfig { - /// A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed. + /// A list of DNS name server IP addresses. + /// This will be appended to the base nameservers generated from DNSPolicy. + /// Duplicated nameservers will be removed. #[serde(default, skip_serializing_if = "Option::is_none")] pub nameservers: Option>, - /// A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy. + /// A list of DNS resolver options. + /// This will be merged with the base options generated from DNSPolicy. + /// Duplicated entries will be removed. Resolution options given in Options + /// will override those that appear in the base DNSPolicy. #[serde(default, skip_serializing_if = "Option::is_none")] pub options: Option>, - /// A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed. + /// A list of DNS search domains for host-name lookup. + /// This will be appended to the base search paths generated from DNSPolicy. + /// Duplicated search paths will be removed. #[serde(default, skip_serializing_if = "Option::is_none")] pub searches: Option>, } @@ -1465,26 +2308,58 @@ pub struct WorkloadPodSetsTemplateSpecDnsConfigOptions { pub value: Option, } -/// An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. -/// To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. +/// An EphemeralContainer is a temporary container that you may add to an existing Pod for +/// user-initiated activities such as debugging. Ephemeral containers have no resource or +/// scheduling guarantees, and they will not be restarted when they exit or when a Pod is +/// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the +/// Pod to exceed its resource allocation. +/// +/// +/// To add an ephemeral container, use the ephemeralcontainers subresource of an existing +/// Pod. Ephemeral containers may not be removed or restarted. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainers { - /// Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Arguments to the entrypoint. + /// The image's CMD is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub args: Option>, - /// Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Entrypoint array. Not executed within a shell. + /// The image's ENTRYPOINT is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, - /// List of environment variables to set in the container. Cannot be updated. + /// List of environment variables to set in the container. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub env: Option>, - /// List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + /// List of sources to populate environment variables in the container. + /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys + /// will be reported as an event when the container is starting. When a key exists in multiple + /// sources, the value associated with the last source will take precedence. + /// Values defined by an Env with a duplicate key will take precedence. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "envFrom")] pub env_from: Option>, - /// Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + /// Container image name. + /// More info: https://kubernetes.io/docs/concepts/containers/images #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + /// Image pull policy. + /// One of Always, Never, IfNotPresent. + /// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")] pub image_pull_policy: Option, /// Lifecycle is not allowed for ephemeral containers. @@ -1493,7 +2368,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainers { /// Probes are not allowed for ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. + /// Name of the ephemeral container specified as a DNS_LABEL. + /// This name must be unique among all containers, init containers and ephemeral containers. pub name: String, /// Ports are not allowed for ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none")] @@ -1504,44 +2380,79 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainers { /// Resources resize policy for the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resizePolicy")] pub resize_policy: Option>, - /// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. + /// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + /// already allocated to the pod. #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, - /// Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers. + /// Restart policy for the container to manage the restart behavior of each + /// container within a pod. + /// This may only be set for init containers. You cannot set this field on + /// ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + /// Optional: SecurityContext defines the security options the ephemeral container should be run with. + /// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, /// Probes are not allowed for ephemeral containers. #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupProbe")] pub startup_probe: Option, - /// Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + /// Whether this container should allocate a buffer for stdin in the container runtime. If this + /// is not set, reads from stdin in the container will always result in EOF. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub stdin: Option, - /// Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + /// Whether the container runtime should close the stdin channel after it has been opened by + /// a single attach. When stdin is true the stdin stream will remain open across multiple attach + /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, + /// at which time stdin is closed and remains closed until the container is restarted. If this + /// flag is false, a container processes that reads from stdin will never receive an EOF. + /// Default is false #[serde(default, skip_serializing_if = "Option::is_none", rename = "stdinOnce")] pub stdin_once: Option, - /// If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. - /// The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. + /// If set, the name of the container from PodSpec that this ephemeral container targets. + /// The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + /// If not set then the ephemeral container uses the namespaces configured in the Pod spec. + /// + /// + /// The container runtime must implement support for this feature. If the runtime does not + /// support namespace targeting then the result of setting this field is undefined. #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetContainerName")] pub target_container_name: Option, - /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + /// Optional: Path at which the file to which the container's termination message + /// will be written is mounted into the container's filesystem. + /// Message written is intended to be brief final status, such as an assertion failure message. + /// Will be truncated by the node if greater than 4096 bytes. The total message length across + /// all containers will be limited to 12kb. + /// Defaults to /dev/termination-log. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePath")] pub termination_message_path: Option, - /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + /// Indicate how the termination message should be populated. File will use the contents of + /// terminationMessagePath to populate the container status message on both success and failure. + /// FallbackToLogsOnError will use the last chunk of container log output if the termination + /// message file is empty and the container exited with an error. + /// The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + /// Defaults to File. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePolicy")] pub termination_message_policy: Option, - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub tty: Option, /// volumeDevices is the list of block devices to be used by the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeDevices")] pub volume_devices: Option>, - /// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated. + /// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] pub volume_mounts: Option>, - /// Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + /// Container's working directory. + /// If not specified, the container runtime's default will be used, which + /// might be configured in the container image. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workingDir")] pub working_dir: Option, } @@ -1551,7 +2462,15 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainers { pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnv { /// Name of the environment variable. Must be a C_IDENTIFIER. pub name: String, - /// Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + /// Variable references $(VAR_NAME) are expanded + /// using the previously defined environment variables in the container and + /// any service environment variables. If a variable cannot be resolved, + /// the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + /// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + /// Escaped references will never be expanded, regardless of whether the variable + /// exists or not. + /// Defaults to "". #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, /// Source for the environment variable's value. Cannot be used if value is not empty. @@ -1565,10 +2484,12 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFrom { /// Selects a key of a ConfigMap. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMapKeyRef")] pub config_map_key_ref: Option, - /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + /// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, /// Selects a key of a secret in the pod's namespace @@ -1581,7 +2502,9 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFrom { pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFromConfigMapKeyRef { /// The key to select. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap or its key must be defined @@ -1589,7 +2512,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFromConfigMapKe pub optional: Option, } -/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. +/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, +/// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFromFieldRef { /// Version of the schema the FieldPath is written in terms of, defaults to "v1". @@ -1600,7 +2524,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFromFieldRef { pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFromResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -1618,7 +2543,9 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFromResourceFie pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvValueFromSecretKeyRef { /// The key of the secret to select from. Must be a valid secret key. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret or its key must be defined @@ -1643,7 +2570,9 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvFrom { /// The ConfigMap to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvFromConfigMapRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap must be defined @@ -1654,7 +2583,9 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvFromConfigMapRef { /// The Secret to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvFromSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret must be defined @@ -1665,15 +2596,29 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersEnvFromSecretRef { /// Lifecycle is not allowed for ephemeral containers. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecycle { - /// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PostStart is called immediately after a container is created. If the handler fails, + /// the container is terminated and restarted according to its restart policy. + /// Other management of the container blocks until the hook completes. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "postStart")] pub post_start: Option, - /// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PreStop is called immediately before a container is terminated due to an + /// API request or management event such as liveness/startup probe failure, + /// preemption, resource contention, etc. The handler is not called if the + /// container crashes or exits. The Pod's termination grace period countdown begins before the + /// PreStop hook is executed. Regardless of the outcome of the handler, the + /// container will eventually terminate within the Pod's termination grace + /// period (unless delayed by finalizers). Other management of the container blocks until the hook completes + /// or until the termination grace period is reached. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "preStop")] pub pre_stop: Option, } -/// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PostStart is called immediately after a container is created. If the handler fails, +/// the container is terminated and restarted according to its restart policy. +/// Other management of the container blocks until the hook completes. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStart { /// Exec specifies the action to take. @@ -1685,7 +2630,9 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStart { /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -1693,7 +2640,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStart { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1701,7 +2652,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartExec /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1710,9 +2662,12 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartHttpG /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1720,7 +2675,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartHttpG /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1733,17 +2689,29 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartSleep pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePostStartTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PreStop is called immediately before a container is terminated due to an +/// API request or management event such as liveness/startup probe failure, +/// preemption, resource contention, etc. The handler is not called if the +/// container crashes or exits. The Pod's termination grace period countdown begins before the +/// PreStop hook is executed. Regardless of the outcome of the handler, the +/// container will eventually terminate within the Pod's termination grace +/// period (unless delayed by finalizers). Other management of the container blocks until the hook completes +/// or until the termination grace period is reached. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStop { /// Exec specifies the action to take. @@ -1755,7 +2723,9 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStop { /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -1763,7 +2733,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStop { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1771,7 +2745,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopExec { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1780,9 +2755,12 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopHttpGet /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1790,7 +2768,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopHttpGet /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1803,13 +2782,17 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopSleep { pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLifecyclePreStopTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -1819,7 +2802,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1828,22 +2812,36 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1851,7 +2849,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1861,8 +2863,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeExec { pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1870,7 +2875,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1879,9 +2885,12 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -1889,7 +2898,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -1901,26 +2911,35 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersLivenessProbeTcpSocket /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerPort represents a network port in a single container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersPorts { - /// Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + /// Number of port to expose on the pod's IP address. + /// This must be a valid port number, 0 < x < 65536. #[serde(rename = "containerPort")] pub container_port: i32, /// What host IP to bind the external port to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIP")] pub host_ip: Option, - /// Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + /// Number of port to expose on the host. + /// If specified, this must be a valid port number, 0 < x < 65536. + /// If HostNetwork is specified, this must match ContainerPort. + /// Most containers do not need this. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPort")] pub host_port: Option, - /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + /// named port in a pod must have a unique name. Name for the port that can be + /// referred to by services. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + /// Protocol for port. Must be UDP, TCP, or SCTP. + /// Defaults to "TCP". #[serde(default, skip_serializing_if = "Option::is_none")] pub protocol: Option, } @@ -1931,7 +2950,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -1940,22 +2960,36 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -1963,7 +2997,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -1973,8 +3011,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeExec { pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -1982,7 +3023,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -1991,9 +3033,12 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2001,7 +3046,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2013,33 +3059,48 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersReadinessProbeTcpSocket /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerResizePolicy represents resource resize policy for the container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersResizePolicy { - /// Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + /// Name of the resource to which this resource resize policy applies. + /// Supported values: cpu, memory. #[serde(rename = "resourceName")] pub resource_name: String, - /// Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + /// Restart policy to apply when specified resource is resized. + /// If not specified, it defaults to NotRequired. #[serde(rename = "restartPolicy")] pub restart_policy: String, } -/// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. +/// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources +/// already allocated to the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersResources { - /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. It can only be set for containers. + /// Claims lists the names of resources, defined in spec.resourceClaims, + /// that are used by this container. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. It can only be set for containers. #[serde(default, skip_serializing_if = "Option::is_none")] pub claims: Option>, - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -2047,49 +3108,94 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersResources { /// ResourceClaim references one entry in PodSpec.ResourceClaims. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersResourcesClaims { - /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + /// Name must match the name of one entry in pod.spec.resourceClaims of + /// the Pod where this field is used. It makes that resource available + /// inside a container. pub name: String, } -/// Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. +/// Optional: SecurityContext defines the security options the ephemeral container should be run with. +/// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersSecurityContext { - /// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + /// AllowPrivilegeEscalation controls whether a process can gain more + /// privileges than its parent process. This bool directly controls if + /// the no_new_privs flag will be set on the container process. + /// AllowPrivilegeEscalation is true always when the container is: + /// 1) run as Privileged + /// 2) has CAP_SYS_ADMIN + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] pub allow_privilege_escalation: Option, - /// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + /// The capabilities to add/drop when running containers. + /// Defaults to the default set of capabilities granted by the container runtime. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option, - /// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + /// Run container in privileged mode. + /// Processes in privileged containers are essentially equivalent to root on the host. + /// Defaults to false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, - /// procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + /// procMount denotes the type of proc mount to use for the containers. + /// The default is DefaultProcMount which uses the container runtime defaults for + /// readonly paths and masked paths. + /// This requires the ProcMountType feature flag to be enabled. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] pub proc_mount: Option, - /// Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + /// Whether this container has a read-only root filesystem. + /// Default is false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] pub read_only_root_filesystem: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to the container. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by this container. If seccomp options are + /// provided at both the pod & container level, the container options + /// override the pod options. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options from the PodSecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. +/// The capabilities to add/drop when running containers. +/// Defaults to the default set of capabilities granted by the container runtime. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersSecurityContextCapabilities { /// Added capabilities @@ -2100,7 +3206,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersSecurityContextCapabili pub drop: Option>, } -/// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to the container. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in PodSecurityContext. If set in both SecurityContext and +/// PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -2117,31 +3227,53 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersSecurityContextSeLinuxO pub user: Option, } -/// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by this container. If seccomp options are +/// provided at both the pod & container level, the container options +/// override the pod options. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options from the PodSecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } @@ -2152,7 +3284,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2161,22 +3294,36 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -2184,7 +3331,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2194,8 +3345,11 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeExec { pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2203,7 +3357,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2212,9 +3367,12 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2222,7 +3380,8 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2234,7 +3393,9 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersStartupProbeTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -2251,26 +3412,36 @@ pub struct WorkloadPodSetsTemplateSpecEphemeralContainersVolumeDevices { /// VolumeMount describes a mounting of a Volume within a container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecEphemeralContainersVolumeMounts { - /// Path within the container at which the volume should be mounted. Must not contain ':'. + /// Path within the container at which the volume should be mounted. Must + /// not contain ':'. #[serde(rename = "mountPath")] pub mount_path: String, - /// mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + /// mountPropagation determines how mounts are propagated from the host + /// to container and the other way around. + /// When not set, MountPropagationNone is used. + /// This field is beta in 1.10. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] pub mount_propagation: Option, /// This must match the Name of a Volume. pub name: String, - /// Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + /// Mounted read-only if true, read-write otherwise (false or unspecified). + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + /// Path within the volume from which the container's volume should be mounted. + /// Defaults to "" (volume's root). #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] pub sub_path: Option, - /// Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + /// Expanded path within the volume from which the container's volume should be mounted. + /// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + /// Defaults to "" (volume's root). + /// SubPathExpr and SubPath are mutually exclusive. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] pub sub_path_expr: Option, } -/// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. +/// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +/// pod's hosts file. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecHostAliases { /// Hostnames for the above IP address. @@ -2281,10 +3452,13 @@ pub struct WorkloadPodSetsTemplateSpecHostAliases { pub ip: Option, } -/// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +/// LocalObjectReference contains enough information to let you locate the +/// referenced object inside the same namespace. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecImagePullSecrets { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -2292,75 +3466,166 @@ pub struct WorkloadPodSetsTemplateSpecImagePullSecrets { /// A single application container that you want to run within a pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainers { - /// Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Arguments to the entrypoint. + /// The container image's CMD is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub args: Option>, - /// Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + /// Entrypoint array. Not executed within a shell. + /// The container image's ENTRYPOINT is used if this is not provided. + /// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + /// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + /// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + /// of whether the variable exists or not. Cannot be updated. + /// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, - /// List of environment variables to set in the container. Cannot be updated. + /// List of environment variables to set in the container. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub env: Option>, - /// List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + /// List of sources to populate environment variables in the container. + /// The keys defined within a source must be a C_IDENTIFIER. All invalid keys + /// will be reported as an event when the container is starting. When a key exists in multiple + /// sources, the value associated with the last source will take precedence. + /// Values defined by an Env with a duplicate key will take precedence. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "envFrom")] pub env_from: Option>, - /// Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + /// Container image name. + /// More info: https://kubernetes.io/docs/concepts/containers/images + /// This field is optional to allow higher level config management to default or override + /// container images in workload controllers like Deployments and StatefulSets. #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + /// Image pull policy. + /// One of Always, Never, IfNotPresent. + /// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images #[serde(default, skip_serializing_if = "Option::is_none", rename = "imagePullPolicy")] pub image_pull_policy: Option, - /// Actions that the management system should take in response to container lifecycle events. Cannot be updated. + /// Actions that the management system should take in response to container lifecycle events. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub lifecycle: Option, - /// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container liveness. + /// Container will be restarted if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + /// Name of the container specified as a DNS_LABEL. + /// Each container in a pod must have a unique name (DNS_LABEL). + /// Cannot be updated. pub name: String, - /// List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + /// List of ports to expose from the container. Not specifying a port here + /// DOES NOT prevent that port from being exposed. Any port which is + /// listening on the default "0.0.0.0" address inside a container will be + /// accessible from the network. + /// Modifying this array with strategic merge patch may corrupt the data. + /// For more information See https://github.com/kubernetes/kubernetes/issues/108255. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none")] pub ports: Option>, - /// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Periodic probe of container service readiness. + /// Container will be removed from service endpoints if the probe fails. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessProbe")] pub readiness_probe: Option, /// Resources resize policy for the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resizePolicy")] pub resize_policy: Option>, - /// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Compute Resources required by this container. + /// Cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, - /// RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed. + /// RestartPolicy defines the restart behavior of individual containers in a pod. + /// This field may only be set for init containers, and the only allowed value is "Always". + /// For non-init containers or when this field is not specified, + /// the restart behavior is defined by the Pod's restart policy and the container type. + /// Setting the RestartPolicy as "Always" for the init container will have the following effect: + /// this init container will be continually restarted on + /// exit until all regular containers have terminated. Once all regular + /// containers have completed, all init containers with restartPolicy "Always" + /// will be shut down. This lifecycle differs from normal init containers and + /// is often referred to as a "sidecar" container. Although this init + /// container still starts in the init container sequence, it does not wait + /// for the container to complete before proceeding to the next init + /// container. Instead, the next init container starts immediately after this + /// init container is started, or after any startupProbe has successfully + /// completed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "restartPolicy")] pub restart_policy: Option, - /// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + /// SecurityContext defines the security options the container should be run with. + /// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + /// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityContext")] pub security_context: Option, - /// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// StartupProbe indicates that the Pod has successfully initialized. + /// If specified, no other probes are executed until this completes successfully. + /// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + /// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + /// when it might take a long time to load data or warm a cache, than during steady-state operation. + /// This cannot be updated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupProbe")] pub startup_probe: Option, - /// Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + /// Whether this container should allocate a buffer for stdin in the container runtime. If this + /// is not set, reads from stdin in the container will always result in EOF. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub stdin: Option, - /// Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + /// Whether the container runtime should close the stdin channel after it has been opened by + /// a single attach. When stdin is true the stdin stream will remain open across multiple attach + /// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + /// first client attaches to stdin, and then remains open and accepts data until the client disconnects, + /// at which time stdin is closed and remains closed until the container is restarted. If this + /// flag is false, a container processes that reads from stdin will never receive an EOF. + /// Default is false #[serde(default, skip_serializing_if = "Option::is_none", rename = "stdinOnce")] pub stdin_once: Option, - /// Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. + /// Optional: Path at which the file to which the container's termination message + /// will be written is mounted into the container's filesystem. + /// Message written is intended to be brief final status, such as an assertion failure message. + /// Will be truncated by the node if greater than 4096 bytes. The total message length across + /// all containers will be limited to 12kb. + /// Defaults to /dev/termination-log. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePath")] pub termination_message_path: Option, - /// Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + /// Indicate how the termination message should be populated. File will use the contents of + /// terminationMessagePath to populate the container status message on both success and failure. + /// FallbackToLogsOnError will use the last chunk of container log output if the termination + /// message file is empty and the container exited with an error. + /// The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + /// Defaults to File. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationMessagePolicy")] pub termination_message_policy: Option, - /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + /// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + /// Default is false. #[serde(default, skip_serializing_if = "Option::is_none")] pub tty: Option, /// volumeDevices is the list of block devices to be used by the container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeDevices")] pub volume_devices: Option>, - /// Pod volumes to mount into the container's filesystem. Cannot be updated. + /// Pod volumes to mount into the container's filesystem. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMounts")] pub volume_mounts: Option>, - /// Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + /// Container's working directory. + /// If not specified, the container runtime's default will be used, which + /// might be configured in the container image. + /// Cannot be updated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workingDir")] pub working_dir: Option, } @@ -2370,7 +3635,15 @@ pub struct WorkloadPodSetsTemplateSpecInitContainers { pub struct WorkloadPodSetsTemplateSpecInitContainersEnv { /// Name of the environment variable. Must be a C_IDENTIFIER. pub name: String, - /// Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". + /// Variable references $(VAR_NAME) are expanded + /// using the previously defined environment variables in the container and + /// any service environment variables. If a variable cannot be resolved, + /// the reference in the input string will be unchanged. Double $$ are reduced + /// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + /// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + /// Escaped references will never be expanded, regardless of whether the variable + /// exists or not. + /// Defaults to "". #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, /// Source for the environment variable's value. Cannot be used if value is not empty. @@ -2384,10 +3657,12 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFrom { /// Selects a key of a ConfigMap. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMapKeyRef")] pub config_map_key_ref: Option, - /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + /// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + /// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, /// Selects a key of a secret in the pod's namespace @@ -2400,7 +3675,9 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFrom { pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFromConfigMapKeyRef { /// The key to select. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap or its key must be defined @@ -2408,7 +3685,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFromConfigMapKeyRef pub optional: Option, } -/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. +/// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, +/// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFromFieldRef { /// Version of the schema the FieldPath is written in terms of, defaults to "v1". @@ -2419,7 +3697,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFromFieldRef { pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFromResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -2437,7 +3716,9 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFromResourceFieldRef pub struct WorkloadPodSetsTemplateSpecInitContainersEnvValueFromSecretKeyRef { /// The key of the secret to select from. Must be a valid secret key. pub key: String, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret or its key must be defined @@ -2462,7 +3743,9 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvFrom { /// The ConfigMap to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersEnvFromConfigMapRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the ConfigMap must be defined @@ -2473,7 +3756,9 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvFromConfigMapRef { /// The Secret to select from #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersEnvFromSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// Specify whether the Secret must be defined @@ -2481,18 +3766,33 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersEnvFromSecretRef { pub optional: Option, } -/// Actions that the management system should take in response to container lifecycle events. Cannot be updated. +/// Actions that the management system should take in response to container lifecycle events. +/// Cannot be updated. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecycle { - /// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PostStart is called immediately after a container is created. If the handler fails, + /// the container is terminated and restarted according to its restart policy. + /// Other management of the container blocks until the hook completes. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "postStart")] pub post_start: Option, - /// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + /// PreStop is called immediately before a container is terminated due to an + /// API request or management event such as liveness/startup probe failure, + /// preemption, resource contention, etc. The handler is not called if the + /// container crashes or exits. The Pod's termination grace period countdown begins before the + /// PreStop hook is executed. Regardless of the outcome of the handler, the + /// container will eventually terminate within the Pod's termination grace + /// period (unless delayed by finalizers). Other management of the container blocks until the hook completes + /// or until the termination grace period is reached. + /// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[serde(default, skip_serializing_if = "Option::is_none", rename = "preStop")] pub pre_stop: Option, } -/// PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PostStart is called immediately after a container is created. If the handler fails, +/// the container is terminated and restarted according to its restart policy. +/// Other management of the container blocks until the hook completes. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStart { /// Exec specifies the action to take. @@ -2504,7 +3804,9 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStart { /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -2512,7 +3814,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStart { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2520,7 +3826,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartExec { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2529,9 +3836,12 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2539,7 +3849,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2552,17 +3863,29 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartSleep { pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePostStartTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks +/// PreStop is called immediately before a container is terminated due to an +/// API request or management event such as liveness/startup probe failure, +/// preemption, resource contention, etc. The handler is not called if the +/// container crashes or exits. The Pod's termination grace period countdown begins before the +/// PreStop hook is executed. Regardless of the outcome of the handler, the +/// container will eventually terminate within the Pod's termination grace +/// period (unless delayed by finalizers). Other management of the container blocks until the hook completes +/// or until the termination grace period is reached. +/// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStop { /// Exec specifies the action to take. @@ -2574,7 +3897,9 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStop { /// Sleep represents the duration that the container should sleep before being terminated. #[serde(default, skip_serializing_if = "Option::is_none")] pub sleep: Option, - /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + /// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + /// for the backward compatibility. There are no validation of this field and + /// lifecycle hooks will fail in runtime when tcp handler is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, } @@ -2582,7 +3907,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStop { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2590,7 +3919,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopExec { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2599,9 +3929,12 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2609,7 +3942,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2622,23 +3956,31 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopSleep { pub seconds: i64, } -/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. +/// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept +/// for the backward compatibility. There are no validation of this field and +/// lifecycle hooks will fail in runtime when tcp handler is specified. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLifecyclePreStopTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } -/// Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container liveness. +/// Container will be restarted if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2647,22 +3989,36 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -2670,7 +4026,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2680,8 +4040,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeExec { pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2689,7 +4052,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2698,9 +4062,12 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2708,7 +4075,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2720,37 +4088,50 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersLivenessProbeTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerPort represents a network port in a single container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersPorts { - /// Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + /// Number of port to expose on the pod's IP address. + /// This must be a valid port number, 0 < x < 65536. #[serde(rename = "containerPort")] pub container_port: i32, /// What host IP to bind the external port to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostIP")] pub host_ip: Option, - /// Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + /// Number of port to expose on the host. + /// If specified, this must be a valid port number, 0 < x < 65536. + /// If HostNetwork is specified, this must match ContainerPort. + /// Most containers do not need this. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPort")] pub host_port: Option, - /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + /// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + /// named port in a pod must have a unique name. Name for the port that can be + /// referred to by services. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + /// Protocol for port. Must be UDP, TCP, or SCTP. + /// Defaults to "TCP". #[serde(default, skip_serializing_if = "Option::is_none")] pub protocol: Option, } -/// Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// Periodic probe of container service readiness. +/// Container will be removed from service endpoints if the probe fails. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2759,22 +4140,36 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -2782,7 +4177,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -2792,8 +4191,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeExec { pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -2801,7 +4203,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -2810,9 +4213,12 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -2820,7 +4226,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -2832,33 +4239,49 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersReadinessProbeTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } /// ContainerResizePolicy represents resource resize policy for the container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersResizePolicy { - /// Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + /// Name of the resource to which this resource resize policy applies. + /// Supported values: cpu, memory. #[serde(rename = "resourceName")] pub resource_name: String, - /// Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + /// Restart policy to apply when specified resource is resized. + /// If not specified, it defaults to NotRequired. #[serde(rename = "restartPolicy")] pub restart_policy: String, } -/// Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +/// Compute Resources required by this container. +/// Cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersResources { - /// Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - /// This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - /// This field is immutable. It can only be set for containers. + /// Claims lists the names of resources, defined in spec.resourceClaims, + /// that are used by this container. + /// + /// + /// This is an alpha field and requires enabling the + /// DynamicResourceAllocation feature gate. + /// + /// + /// This field is immutable. It can only be set for containers. #[serde(default, skip_serializing_if = "Option::is_none")] pub claims: Option>, - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -2866,49 +4289,95 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersResources { /// ResourceClaim references one entry in PodSpec.ResourceClaims. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersResourcesClaims { - /// Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + /// Name must match the name of one entry in pod.spec.resourceClaims of + /// the Pod where this field is used. It makes that resource available + /// inside a container. pub name: String, } -/// SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +/// SecurityContext defines the security options the container should be run with. +/// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. +/// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersSecurityContext { - /// AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + /// AllowPrivilegeEscalation controls whether a process can gain more + /// privileges than its parent process. This bool directly controls if + /// the no_new_privs flag will be set on the container process. + /// AllowPrivilegeEscalation is true always when the container is: + /// 1) run as Privileged + /// 2) has CAP_SYS_ADMIN + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowPrivilegeEscalation")] pub allow_privilege_escalation: Option, - /// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + /// The capabilities to add/drop when running containers. + /// Defaults to the default set of capabilities granted by the container runtime. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub capabilities: Option, - /// Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + /// Run container in privileged mode. + /// Processes in privileged containers are essentially equivalent to root on the host. + /// Defaults to false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub privileged: Option, - /// procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + /// procMount denotes the type of proc mount to use for the containers. + /// The default is DefaultProcMount which uses the container runtime defaults for + /// readonly paths and masked paths. + /// This requires the ProcMountType feature flag to be enabled. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "procMount")] pub proc_mount: Option, - /// Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + /// Whether this container has a read-only root filesystem. + /// Default is false. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnlyRootFilesystem")] pub read_only_root_filesystem: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to the container. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by this container. If seccomp options are + /// provided at both the pod & container level, the container options + /// override the pod options. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options from the PodSecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. +/// The capabilities to add/drop when running containers. +/// Defaults to the default set of capabilities granted by the container runtime. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersSecurityContextCapabilities { /// Added capabilities @@ -2919,7 +4388,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersSecurityContextCapabilities pub drop: Option>, } -/// The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to the container. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in PodSecurityContext. If set in both SecurityContext and +/// PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -2936,42 +4409,71 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersSecurityContextSeLinuxOption pub user: Option, } -/// The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by this container. If seccomp options are +/// provided at both the pod & container level, the container options +/// override the pod options. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options from the PodSecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } -/// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes +/// StartupProbe indicates that the Pod has successfully initialized. +/// If specified, no other probes are executed until this completes successfully. +/// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. +/// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, +/// when it might take a long time to load data or warm a cache, than during steady-state operation. +/// This cannot be updated. +/// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbe { /// Exec specifies the action to take. #[serde(default, skip_serializing_if = "Option::is_none")] pub exec: Option, - /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. + /// Defaults to 3. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] pub failure_threshold: Option, /// GRPC specifies an action involving a GRPC port. @@ -2980,22 +4482,36 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbe { /// HTTPGet specifies the http request to perform. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] pub http_get: Option, - /// Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after the container has started before liveness probes are initiated. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] pub initial_delay_seconds: Option, - /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + /// How often (in seconds) to perform the probe. + /// Default to 10 seconds. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] pub period_seconds: Option, - /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + /// Minimum consecutive successes for the probe to be considered successful after having failed. + /// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] pub success_threshold: Option, /// TCPSocket specifies an action involving a TCP port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tcpSocket")] pub tcp_socket: Option, - /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + /// Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + /// The grace period is the duration in seconds after the processes running in the pod are sent + /// a termination signal and the time when the processes are forcibly halted with a kill signal. + /// Set this value longer than the expected cleanup time for your process. + /// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + /// value overrides the value provided by the pod spec. + /// Value must be non-negative integer. The value zero indicates stop immediately via + /// the kill signal (no opportunity to shut down). + /// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + /// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationGracePeriodSeconds")] pub termination_grace_period_seconds: Option, - /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + /// Number of seconds after which the probe times out. + /// Defaults to 1 second. Minimum value is 1. + /// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] pub timeout_seconds: Option, } @@ -3003,7 +4519,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbe { /// Exec specifies the action to take. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeExec { - /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + /// Command is the command line to execute inside the container, the working directory for the + /// command is root ('/') in the container's filesystem. The command is simply exec'd, it is + /// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + /// a shell, you need to explicitly call out to that shell. + /// Exit status of 0 is treated as live/healthy and non-zero is unhealthy. #[serde(default, skip_serializing_if = "Option::is_none")] pub command: Option>, } @@ -3013,8 +4533,11 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeExec { pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeGrpc { /// Port number of the gRPC service. Number must be in the range 1 to 65535. pub port: i32, - /// Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - /// If this is not specified, the default behavior is defined by gRPC. + /// Service is the name of the service to place in the gRPC HealthCheckRequest + /// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + /// + /// + /// If this is not specified, the default behavior is defined by gRPC. #[serde(default, skip_serializing_if = "Option::is_none")] pub service: Option, } @@ -3022,7 +4545,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeGrpc { /// HTTPGet specifies the http request to perform. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeHttpGet { - /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + /// Host name to connect to, defaults to the pod IP. You probably want to set + /// "Host" in httpHeaders instead. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, /// Custom headers to set in the request. HTTP allows repeated headers. @@ -3031,9 +4555,12 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeHttpGet { /// Path to access on the HTTP server. #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Name or number of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, - /// Scheme to use for connecting to the host. Defaults to HTTP. + /// Scheme to use for connecting to the host. + /// Defaults to HTTP. #[serde(default, skip_serializing_if = "Option::is_none")] pub scheme: Option, } @@ -3041,7 +4568,8 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeHttpGet { /// HTTPHeader describes a custom header to be used in HTTP probes #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeHttpGetHttpHeaders { - /// The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + /// The header field name. + /// This will be canonicalized upon output, so case-variant names will be understood as the same header. pub name: String, /// The header field value pub value: String, @@ -3053,7 +4581,9 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersStartupProbeTcpSocket { /// Optional: Host name to connect to, defaults to the pod IP. #[serde(default, skip_serializing_if = "Option::is_none")] pub host: Option, - /// Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + /// Number or name of the port to access on the container. + /// Number must be in the range 1 to 65535. + /// Name must be an IANA_SVC_NAME. pub port: IntOrString, } @@ -3070,31 +4600,70 @@ pub struct WorkloadPodSetsTemplateSpecInitContainersVolumeDevices { /// VolumeMount describes a mounting of a Volume within a container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecInitContainersVolumeMounts { - /// Path within the container at which the volume should be mounted. Must not contain ':'. + /// Path within the container at which the volume should be mounted. Must + /// not contain ':'. #[serde(rename = "mountPath")] pub mount_path: String, - /// mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + /// mountPropagation determines how mounts are propagated from the host + /// to container and the other way around. + /// When not set, MountPropagationNone is used. + /// This field is beta in 1.10. #[serde(default, skip_serializing_if = "Option::is_none", rename = "mountPropagation")] pub mount_propagation: Option, /// This must match the Name of a Volume. pub name: String, - /// Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + /// Mounted read-only if true, read-write otherwise (false or unspecified). + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + /// Path within the volume from which the container's volume should be mounted. + /// Defaults to "" (volume's root). #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPath")] pub sub_path: Option, - /// Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + /// Expanded path within the volume from which the container's volume should be mounted. + /// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + /// Defaults to "" (volume's root). + /// SubPathExpr and SubPath are mutually exclusive. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subPathExpr")] pub sub_path_expr: Option, } -/// Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. -/// If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions -/// If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup +/// Specifies the OS of the containers in the pod. +/// Some pod and container fields are restricted if this is set. +/// +/// +/// If the OS field is set to linux, the following fields must be unset: +/// -securityContext.windowsOptions +/// +/// +/// If the OS field is set to windows, following fields must be unset: +/// - spec.hostPID +/// - spec.hostIPC +/// - spec.hostUsers +/// - spec.securityContext.seLinuxOptions +/// - spec.securityContext.seccompProfile +/// - spec.securityContext.fsGroup +/// - spec.securityContext.fsGroupChangePolicy +/// - spec.securityContext.sysctls +/// - spec.shareProcessNamespace +/// - spec.securityContext.runAsUser +/// - spec.securityContext.runAsGroup +/// - spec.securityContext.supplementalGroups +/// - spec.containers[*].securityContext.seLinuxOptions +/// - spec.containers[*].securityContext.seccompProfile +/// - spec.containers[*].securityContext.capabilities +/// - spec.containers[*].securityContext.readOnlyRootFilesystem +/// - spec.containers[*].securityContext.privileged +/// - spec.containers[*].securityContext.allowPrivilegeEscalation +/// - spec.containers[*].securityContext.procMount +/// - spec.containers[*].securityContext.runAsUser +/// - spec.containers[*].securityContext.runAsGroup #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecOs { - /// Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null + /// Name is the name of the operating system. The currently supported values are linux and windows. + /// Additional value may be defined in future and can be one of: + /// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + /// Clients should expect to handle additional values and treat unrecognized values in this field as os: null pub name: String, } @@ -3106,10 +4675,13 @@ pub struct WorkloadPodSetsTemplateSpecReadinessGates { pub condition_type: String, } -/// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name. +/// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +/// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. +/// Containers that need access to the ResourceClaim reference it with this name. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecResourceClaims { - /// Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL. + /// Name uniquely identifies this resource claim inside the pod. + /// This must be a DNS_LABEL. pub name: String, /// Source describes where to find the ResourceClaim. #[serde(default, skip_serializing_if = "Option::is_none")] @@ -3119,12 +4691,24 @@ pub struct WorkloadPodSetsTemplateSpecResourceClaims { /// Source describes where to find the ResourceClaim. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecResourceClaimsSource { - /// ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod. + /// ResourceClaimName is the name of a ResourceClaim object in the same + /// namespace as this pod. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaimName")] pub resource_claim_name: Option, - /// ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. - /// The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - /// This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. + /// ResourceClaimTemplateName is the name of a ResourceClaimTemplate + /// object in the same namespace as this pod. + /// + /// + /// The template will be used to create a new ResourceClaim, which will + /// be bound to this pod. When this pod is deleted, the ResourceClaim + /// will also be deleted. The pod name and resource name, along with a + /// generated component, will be used to form a unique name for the + /// ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + /// + /// + /// This field is immutable and no changes will be made to the + /// corresponding ResourceClaim by the control plane after creating the + /// ResourceClaim. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceClaimTemplateName")] pub resource_claim_template_name: Option, } @@ -3132,48 +4716,102 @@ pub struct WorkloadPodSetsTemplateSpecResourceClaimsSource { /// PodSchedulingGate is associated to a Pod to guard its scheduling. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecSchedulingGates { - /// Name of the scheduling gate. Each scheduling gate must have a unique name field. + /// Name of the scheduling gate. + /// Each scheduling gate must have a unique name field. pub name: String, } -/// SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. +/// SecurityContext holds pod-level security attributes and common container settings. +/// Optional: Defaults to empty. See type description for default values of each field. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecSecurityContext { - /// A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: - /// 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- - /// If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. + /// A special supplemental group that applies to all containers in a pod. + /// Some volume types allow the Kubelet to change the ownership of that volume + /// to be owned by the pod: + /// + /// + /// 1. The owning GID will be the FSGroup + /// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + /// 3. The permission bits are OR'd with rw-rw---- + /// + /// + /// If unset, the Kubelet will not modify the ownership and permissions of any volume. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsGroup")] pub fs_group: Option, - /// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows. + /// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + /// before being exposed inside Pod. This field will only apply to + /// volume types which support fsGroup based ownership(and permissions). + /// It will have no effect on ephemeral volume types such as: secret, configmaps + /// and emptydir. + /// Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsGroupChangePolicy")] pub fs_group_change_policy: Option, - /// The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + /// The GID to run the entrypoint of the container process. + /// Uses runtime default if unset. + /// May also be set in SecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence + /// for that container. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsGroup")] pub run_as_group: Option, - /// Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Indicates that the container must run as a non-root user. + /// If true, the Kubelet will validate the image at runtime to ensure that it + /// does not run as UID 0 (root) and fail to start the container if it does. + /// If unset or false, no such validation will be performed. + /// May also be set in SecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsNonRoot")] pub run_as_non_root: Option, - /// The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + /// The UID to run the entrypoint of the container process. + /// Defaults to user specified in image metadata if unspecified. + /// May also be set in SecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence + /// for that container. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUser")] pub run_as_user: Option, - /// The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + /// The SELinux context to be applied to all containers. + /// If unspecified, the container runtime will allocate a random SELinux context for each + /// container. May also be set in SecurityContext. If set in + /// both SecurityContext and PodSecurityContext, the value specified in SecurityContext + /// takes precedence for that container. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seLinuxOptions")] pub se_linux_options: Option, - /// The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + /// The seccomp options to use by the containers in this pod. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "seccompProfile")] pub seccomp_profile: Option, - /// A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. + /// A list of groups applied to the first process run in each container, in addition + /// to the container's primary GID, the fsGroup (if specified), and group memberships + /// defined in the container image for the uid of the container process. If unspecified, + /// no additional groups are added to any container. Note that group memberships + /// defined in the container image for the uid of the container process are still effective, + /// even if they are not included in this list. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none", rename = "supplementalGroups")] pub supplemental_groups: Option>, - /// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + /// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + /// sysctls (by the container runtime) might fail to launch. + /// Note that this field cannot be set when spec.os.name is windows. #[serde(default, skip_serializing_if = "Option::is_none")] pub sysctls: Option>, - /// The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + /// The Windows specific settings applied to all containers. + /// If unspecified, the options within a container's SecurityContext will be used. + /// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// Note that this field cannot be set when spec.os.name is linux. #[serde(default, skip_serializing_if = "Option::is_none", rename = "windowsOptions")] pub windows_options: Option, } -/// The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. +/// The SELinux context to be applied to all containers. +/// If unspecified, the container runtime will allocate a random SELinux context for each +/// container. May also be set in SecurityContext. If set in +/// both SecurityContext and PodSecurityContext, the value specified in SecurityContext +/// takes precedence for that container. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecSecurityContextSeLinuxOptions { /// Level is SELinux level label that applies to the container. @@ -3190,14 +4828,23 @@ pub struct WorkloadPodSetsTemplateSpecSecurityContextSeLinuxOptions { pub user: Option, } -/// The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. +/// The seccomp options to use by the containers in this pod. +/// Note that this field cannot be set when spec.os.name is windows. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecSecurityContextSeccompProfile { - /// localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + /// localhostProfile indicates a profile defined in a file on the node should be used. + /// The profile must be preconfigured on the node to work. + /// Must be a descending path, relative to the kubelet's configured seccomp profile location. + /// Must be set if type is "Localhost". Must NOT be set for any other type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "localhostProfile")] pub localhost_profile: Option, - /// type indicates which kind of seccomp profile will be applied. Valid options are: - /// Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. + /// type indicates which kind of seccomp profile will be applied. + /// Valid options are: + /// + /// + /// Localhost - a profile defined in a file on the node should be used. + /// RuntimeDefault - the container runtime default profile should be used. + /// Unconfined - no profile should be applied. #[serde(rename = "type")] pub r#type: String, } @@ -3211,39 +4858,60 @@ pub struct WorkloadPodSetsTemplateSpecSecurityContextSysctls { pub value: String, } -/// The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. +/// The Windows specific settings applied to all containers. +/// If unspecified, the options within a container's SecurityContext will be used. +/// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. +/// Note that this field cannot be set when spec.os.name is linux. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecSecurityContextWindowsOptions { - /// GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + /// GMSACredentialSpec is where the GMSA admission webhook + /// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + /// GMSA credential spec named by the GMSACredentialSpecName field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpec")] pub gmsa_credential_spec: Option, /// GMSACredentialSpecName is the name of the GMSA credential spec to use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gmsaCredentialSpecName")] pub gmsa_credential_spec_name: Option, - /// HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + /// HostProcess determines if a container should be run as a 'Host Process' container. + /// All of a Pod's containers must have the same effective HostProcess value + /// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + /// In addition, if HostProcess is true then HostNetwork must also be set to true. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostProcess")] pub host_process: Option, - /// The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + /// The UserName in Windows to run the entrypoint of the container process. + /// Defaults to the user specified in image metadata if unspecified. + /// May also be set in PodSecurityContext. If set in both SecurityContext and + /// PodSecurityContext, the value specified in SecurityContext takes precedence. #[serde(default, skip_serializing_if = "Option::is_none", rename = "runAsUserName")] pub run_as_user_name: Option, } -/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +/// The pod this Toleration is attached to tolerates any taint that matches +/// the triple using the matching operator . #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecTolerations { - /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + /// Effect indicates the taint effect to match. Empty means match all taint effects. + /// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. #[serde(default, skip_serializing_if = "Option::is_none")] pub effect: Option, - /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. + /// If the key is empty, operator must be Exists; this combination means to match all values and all keys. #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option, - /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + /// Operator represents a key's relationship to the value. + /// Valid operators are Exists and Equal. Defaults to Equal. + /// Exists is equivalent to wildcard for value, so that a pod can + /// tolerate all taints of a particular category. #[serde(default, skip_serializing_if = "Option::is_none")] pub operator: Option, - /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + /// TolerationSeconds represents the period of time the toleration (which must be + /// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + /// it is not set, which means tolerate the taint forever (do not evict). Zero and + /// negative values will be treated as 0 (evict immediately) by the system. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] pub toleration_seconds: Option, - /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + /// Value is the taint value the toleration matches to. + /// If the operator is Exists, the value should be empty, otherwise just a regular string. #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, } @@ -3251,56 +4919,151 @@ pub struct WorkloadPodSetsTemplateSpecTolerations { /// TopologySpreadConstraint specifies how to spread matching pods among the given topology. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecTopologySpreadConstraints { - /// LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + /// LabelSelector is used to find matching pods. + /// Pods that match this label selector are counted to determine the number of pods + /// in their corresponding topology domain. #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. - /// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + /// MatchLabelKeys is a set of pod label keys to select the pods over which + /// spreading will be calculated. The keys are used to lookup values from the + /// incoming pod labels, those key-value labels are ANDed with labelSelector + /// to select the group of existing pods over which spreading will be calculated + /// for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + /// MatchLabelKeys cannot be set when LabelSelector isn't set. + /// Keys that don't exist in the incoming pod labels will + /// be ignored. A null or empty list means only match against labelSelector. + /// + /// + /// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabelKeys")] pub match_label_keys: Option>, - /// MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. + /// MaxSkew describes the degree to which pods may be unevenly distributed. + /// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + /// between the number of matching pods in the target topology and the global minimum. + /// The global minimum is the minimum number of matching pods in an eligible domain + /// or zero if the number of eligible domains is less than MinDomains. + /// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + /// labelSelector spread as 2/2/1: + /// In this case, the global minimum is 1. + /// | zone1 | zone2 | zone3 | + /// | P P | P P | P | + /// - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + /// scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + /// violate MaxSkew(1). + /// - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + /// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + /// to topologies that satisfy it. + /// It's a required field. Default value is 1 and 0 is not allowed. #[serde(rename = "maxSkew")] pub max_skew: i32, - /// MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - /// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - /// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + /// MinDomains indicates a minimum number of eligible domains. + /// When the number of eligible domains with matching topology keys is less than minDomains, + /// Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + /// And when the number of eligible domains with matching topology keys equals or greater than minDomains, + /// this value has no effect on scheduling. + /// As a result, when the number of eligible domains is less than minDomains, + /// scheduler won't schedule more than maxSkew Pods to those domains. + /// If value is nil, the constraint behaves as if MinDomains is equal to 1. + /// Valid values are integers greater than 0. + /// When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + /// + /// + /// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + /// labelSelector spread as 2/2/2: + /// | zone1 | zone2 | zone3 | + /// | P P | P P | P P | + /// The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + /// In this situation, new pod with the same labelSelector cannot be scheduled, + /// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + /// it will violate MaxSkew. + /// + /// + /// This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). #[serde(default, skip_serializing_if = "Option::is_none", rename = "minDomains")] pub min_domains: Option, - /// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - /// If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + /// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + /// when calculating pod topology spread skew. Options are: + /// - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + /// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + /// + /// + /// If this value is nil, the behavior is equivalent to the Honor policy. + /// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeAffinityPolicy")] pub node_affinity_policy: Option, - /// NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. - /// If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + /// NodeTaintsPolicy indicates how we will treat node taints when calculating + /// pod topology spread skew. Options are: + /// - Honor: nodes without taints, along with tainted nodes for which the incoming pod + /// has a toleration, are included. + /// - Ignore: node taints are ignored. All nodes are included. + /// + /// + /// If this value is nil, the behavior is equivalent to the Ignore policy. + /// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeTaintsPolicy")] pub node_taints_policy: Option, - /// TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + /// TopologyKey is the key of node labels. Nodes that have a label with this key + /// and identical values are considered to be in the same topology. + /// We consider each as a "bucket", and try to put balanced number + /// of pods into each bucket. + /// We define a domain as a particular instance of a topology. + /// Also, we define an eligible domain as a domain whose nodes meet the requirements of + /// nodeAffinityPolicy and nodeTaintsPolicy. + /// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + /// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + /// It's a required field. #[serde(rename = "topologyKey")] pub topology_key: String, - /// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. + /// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + /// the spread constraint. + /// - DoNotSchedule (default) tells the scheduler not to schedule it. + /// - ScheduleAnyway tells the scheduler to schedule the pod in any location, + /// but giving higher precedence to topologies that would help reduce the + /// skew. + /// A constraint is considered "Unsatisfiable" for an incoming pod + /// if and only if every possible node assignment for that pod would violate + /// "MaxSkew" on some topology. + /// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + /// labelSelector spread as 3/1/1: + /// | zone1 | zone2 | zone3 | + /// | P P P | P | P | + /// If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + /// to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + /// MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + /// won't make it *more* imbalanced. + /// It's a required field. #[serde(rename = "whenUnsatisfiable")] pub when_unsatisfiable: String, } -/// LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. +/// LabelSelector is used to find matching pods. +/// Pods that match this label selector are counted to determine the number of pods +/// in their corresponding topology domain. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecTopologySpreadConstraintsLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecTopologySpreadConstraintsLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -3308,7 +5071,9 @@ pub struct WorkloadPodSetsTemplateSpecTopologySpreadConstraintsLabelSelectorMatc /// Volume represents a named volume in a pod that may be accessed by any container in the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumes { - /// awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// awsElasticBlockStore represents an AWS Disk resource that is attached to a + /// kubelet's host machine and then exposed to the pod. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[serde(default, skip_serializing_if = "Option::is_none", rename = "awsElasticBlockStore")] pub aws_elastic_block_store: Option, /// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. @@ -3320,7 +5085,8 @@ pub struct WorkloadPodSetsTemplateSpecVolumes { /// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime #[serde(default, skip_serializing_if = "Option::is_none")] pub cephfs: Option, - /// cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// cinder represents a cinder volume attached and mounted on kubelets host machine. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub cinder: Option, /// configMap represents a configMap that should populate this volume @@ -3332,46 +5098,91 @@ pub struct WorkloadPodSetsTemplateSpecVolumes { /// downwardAPI represents downward API about the pod that should populate this volume #[serde(default, skip_serializing_if = "Option::is_none", rename = "downwardAPI")] pub downward_api: Option, - /// emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + /// emptyDir represents a temporary directory that shares a pod's lifetime. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[serde(default, skip_serializing_if = "Option::is_none", rename = "emptyDir")] pub empty_dir: Option, - /// ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. - /// Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). - /// Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. - /// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. - /// A pod can use both types of ephemeral volumes and persistent volumes at the same time. + /// ephemeral represents a volume that is handled by a cluster storage driver. + /// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + /// and deleted when the pod is removed. + /// + /// + /// Use this if: + /// a) the volume is only needed while the pod runs, + /// b) features of normal volumes like restoring from snapshot or capacity + /// tracking are needed, + /// c) the storage driver is specified through a storage class, and + /// d) the storage driver supports dynamic volume provisioning through + /// a PersistentVolumeClaim (see EphemeralVolumeSource for more + /// information on the connection between this volume type + /// and PersistentVolumeClaim). + /// + /// + /// Use PersistentVolumeClaim or one of the vendor-specific + /// APIs for volumes that persist for longer than the lifecycle + /// of an individual pod. + /// + /// + /// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + /// be used that way - see the documentation of the driver for + /// more information. + /// + /// + /// A pod can use both types of ephemeral volumes and + /// persistent volumes at the same time. #[serde(default, skip_serializing_if = "Option::is_none")] pub ephemeral: Option, /// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. #[serde(default, skip_serializing_if = "Option::is_none")] pub fc: Option, - /// flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + /// flexVolume represents a generic volume resource that is + /// provisioned/attached using an exec based plugin. #[serde(default, skip_serializing_if = "Option::is_none", rename = "flexVolume")] pub flex_volume: Option, /// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running #[serde(default, skip_serializing_if = "Option::is_none")] pub flocker: Option, - /// gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// gcePersistentDisk represents a GCE Disk resource that is attached to a + /// kubelet's host machine and then exposed to the pod. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(default, skip_serializing_if = "Option::is_none", rename = "gcePersistentDisk")] pub gce_persistent_disk: Option, - /// gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. + /// gitRepo represents a git repository at a particular revision. + /// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + /// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + /// into the Pod's container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "gitRepo")] pub git_repo: Option, - /// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md + /// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub glusterfs: Option, - /// hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write. + /// hostPath represents a pre-existing file or directory on the host + /// machine that is directly exposed to the container. This is generally + /// used for system agents or other privileged things that are allowed + /// to see the host machine. Most containers will NOT need this. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + /// --- + /// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + /// mount host directories as read/write. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostPath")] pub host_path: Option, - /// iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md + /// iscsi represents an ISCSI Disk resource that is attached to a + /// kubelet's host machine and then exposed to the pod. + /// More info: https://examples.k8s.io/volumes/iscsi/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub iscsi: Option, - /// name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// name of the volume. + /// Must be a DNS_LABEL and unique within the pod. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names pub name: String, - /// nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// nfs represents an NFS mount on the host that shares a pod's lifetime + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs #[serde(default, skip_serializing_if = "Option::is_none")] pub nfs: Option, - /// persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + /// persistentVolumeClaimVolumeSource represents a reference to a + /// PersistentVolumeClaim in the same namespace. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims #[serde(default, skip_serializing_if = "Option::is_none", rename = "persistentVolumeClaim")] pub persistent_volume_claim: Option, /// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine @@ -3386,13 +5197,15 @@ pub struct WorkloadPodSetsTemplateSpecVolumes { /// quobyte represents a Quobyte mount on the host that shares a pod's lifetime #[serde(default, skip_serializing_if = "Option::is_none")] pub quobyte: Option, - /// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md + /// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + /// More info: https://examples.k8s.io/volumes/rbd/README.md #[serde(default, skip_serializing_if = "Option::is_none")] pub rbd: Option, /// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "scaleIO")] pub scale_io: Option, - /// secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + /// secret represents a secret that should populate this volume. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret #[serde(default, skip_serializing_if = "Option::is_none")] pub secret: Option, /// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. @@ -3403,19 +5216,30 @@ pub struct WorkloadPodSetsTemplateSpecVolumes { pub vsphere_volume: Option, } -/// awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore +/// awsElasticBlockStore represents an AWS Disk resource that is attached to a +/// kubelet's host machine and then exposed to the pod. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesAwsElasticBlockStore { - /// fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + /// partition is the partition in the volume that you want to mount. + /// If omitted, the default is to mount by volume name. + /// Examples: For volume /dev/sda1, you specify the partition as "1". + /// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). #[serde(default, skip_serializing_if = "Option::is_none")] pub partition: Option, - /// readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// readOnly value true will force the readOnly setting in VolumeMounts. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + /// volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore #[serde(rename = "volumeID")] pub volume_id: String, } @@ -3432,13 +5256,16 @@ pub struct WorkloadPodSetsTemplateSpecVolumesAzureDisk { /// diskURI is the URI of data disk in the blob storage #[serde(rename = "diskURI")] pub disk_uri: String, - /// fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is Filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared #[serde(default, skip_serializing_if = "Option::is_none")] pub kind: Option, - /// readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } @@ -3446,7 +5273,8 @@ pub struct WorkloadPodSetsTemplateSpecVolumesAzureDisk { /// azureFile represents an Azure File Service mount on the host and bind mount to the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesAzureFile { - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// secretName is the name of secret that contains Azure Storage Account Name and Key @@ -3460,54 +5288,74 @@ pub struct WorkloadPodSetsTemplateSpecVolumesAzureFile { /// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesCephfs { - /// monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// monitors is Required: Monitors is a collection of Ceph monitors + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it pub monitors: Vec, /// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / #[serde(default, skip_serializing_if = "Option::is_none")] pub path: Option, - /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretFile")] pub secret_file: Option, - /// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + /// user is optional: User is the rados user name, default is admin + /// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub user: Option, } -/// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it +/// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. +/// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesCephfsSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md +/// cinder represents a cinder volume attached and mounted on kubelets host machine. +/// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesCinder { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef is optional: points to a secret object containing parameters used to connect to OpenStack. + /// secretRef is optional: points to a secret object containing parameters used to connect + /// to OpenStack. #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + /// volumeID used to identify the volume in cinder. + /// More info: https://examples.k8s.io/mysql-cinder-pd/README.md #[serde(rename = "volumeID")] pub volume_id: String, } -/// secretRef is optional: points to a secret object containing parameters used to connect to OpenStack. +/// secretRef is optional: points to a secret object containing parameters used to connect +/// to OpenStack. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesCinderSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -3515,13 +5363,27 @@ pub struct WorkloadPodSetsTemplateSpecVolumesCinderSecretRef { /// configMap represents a configMap that should populate this volume #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesConfigMap { - /// defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// defaultMode is optional: mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// Defaults to 0644. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, - /// items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items if unspecified, each key-value pair in the Data field of the referenced + /// ConfigMap will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the ConfigMap, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// optional specify whether the ConfigMap or its keys must be defined @@ -3534,36 +5396,59 @@ pub struct WorkloadPodSetsTemplateSpecVolumesConfigMap { pub struct WorkloadPodSetsTemplateSpecVolumesConfigMapItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } /// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesCsi { - /// driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + /// driver is the name of the CSI driver that handles this volume. + /// Consult with your admin for the correct name as registered in the cluster. pub driver: String, - /// fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + /// fsType to mount. Ex. "ext4", "xfs", "ntfs". + /// If not provided, the empty value is passed to the associated CSI driver + /// which will determine the default filesystem to apply. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + /// nodePublishSecretRef is a reference to the secret object containing + /// sensitive information to pass to the CSI driver to complete the CSI + /// NodePublishVolume and NodeUnpublishVolume calls. + /// This field is optional, and may be empty if no secret is required. If the + /// secret object contains more than one secret, all secret references are passed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodePublishSecretRef")] pub node_publish_secret_ref: Option, - /// readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + /// readOnly specifies a read-only configuration for the volume. + /// Defaults to false (read/write). #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + /// volumeAttributes stores driver-specific properties that are passed to the CSI + /// driver. Consult your driver's documentation for supported values. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeAttributes")] pub volume_attributes: Option>, } -/// nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. +/// nodePublishSecretRef is a reference to the secret object containing +/// sensitive information to pass to the CSI driver to complete the CSI +/// NodePublishVolume and NodeUnpublishVolume calls. +/// This field is optional, and may be empty if no secret is required. If the +/// secret object contains more than one secret, all secret references are passed. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesCsiNodePublishSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -3571,7 +5456,14 @@ pub struct WorkloadPodSetsTemplateSpecVolumesCsiNodePublishSecretRef { /// downwardAPI represents downward API about the pod that should populate this volume #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesDownwardApi { - /// Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// Optional: mode bits to use on created files by default. Must be a + /// Optional: mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// Defaults to 0644. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, /// Items is a list of downward API volume file @@ -3585,12 +5477,18 @@ pub struct WorkloadPodSetsTemplateSpecVolumesDownwardApiItems { /// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// Optional: mode bits used to set permissions on this file, must be an octal value + /// between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, /// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' pub path: String, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, } @@ -3606,7 +5504,8 @@ pub struct WorkloadPodSetsTemplateSpecVolumesDownwardApiItemsFieldRef { pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesDownwardApiItemsResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -3619,46 +5518,125 @@ pub struct WorkloadPodSetsTemplateSpecVolumesDownwardApiItemsResourceFieldRef { pub resource: String, } -/// emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir +/// emptyDir represents a temporary directory that shares a pod's lifetime. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEmptyDir { - /// medium represents what type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + /// medium represents what type of storage medium should back this directory. + /// The default is "" which means to use the node's default medium. + /// Must be an empty string (default) or Memory. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[serde(default, skip_serializing_if = "Option::is_none")] pub medium: Option, - /// sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + /// sizeLimit is the total amount of local storage required for this EmptyDir volume. + /// The size limit is also applicable for memory medium. + /// The maximum usage on memory medium EmptyDir would be the minimum value between + /// the SizeLimit specified here and the sum of memory limits of all containers in a pod. + /// The default is nil which means that the limit is undefined. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir #[serde(default, skip_serializing_if = "Option::is_none", rename = "sizeLimit")] pub size_limit: Option, } -/// ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. -/// Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). -/// Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. -/// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. -/// A pod can use both types of ephemeral volumes and persistent volumes at the same time. +/// ephemeral represents a volume that is handled by a cluster storage driver. +/// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, +/// and deleted when the pod is removed. +/// +/// +/// Use this if: +/// a) the volume is only needed while the pod runs, +/// b) features of normal volumes like restoring from snapshot or capacity +/// tracking are needed, +/// c) the storage driver is specified through a storage class, and +/// d) the storage driver supports dynamic volume provisioning through +/// a PersistentVolumeClaim (see EphemeralVolumeSource for more +/// information on the connection between this volume type +/// and PersistentVolumeClaim). +/// +/// +/// Use PersistentVolumeClaim or one of the vendor-specific +/// APIs for volumes that persist for longer than the lifecycle +/// of an individual pod. +/// +/// +/// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to +/// be used that way - see the documentation of the driver for +/// more information. +/// +/// +/// A pod can use both types of ephemeral volumes and +/// persistent volumes at the same time. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeral { - /// Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - /// An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. - /// This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. - /// Required, must not be nil. + /// Will be used to create a stand-alone PVC to provision the volume. + /// The pod in which this EphemeralVolumeSource is embedded will be the + /// owner of the PVC, i.e. the PVC will be deleted together with the + /// pod. The name of the PVC will be `-` where + /// `` is the name from the `PodSpec.Volumes` array + /// entry. Pod validation will reject the pod if the concatenated name + /// is not valid for a PVC (for example, too long). + /// + /// + /// An existing PVC with that name that is not owned by the pod + /// will *not* be used for the pod to avoid using an unrelated + /// volume by mistake. Starting the pod is then blocked until + /// the unrelated PVC is removed. If such a pre-created PVC is + /// meant to be used by the pod, the PVC has to updated with an + /// owner reference to the pod once the pod exists. Normally + /// this should not be necessary, but it may be useful when + /// manually reconstructing a broken cluster. + /// + /// + /// This field is read-only and no changes will be made by Kubernetes + /// to the PVC after it has been created. + /// + /// + /// Required, must not be nil. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeClaimTemplate")] pub volume_claim_template: Option, } -/// Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). -/// An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. -/// This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. -/// Required, must not be nil. +/// Will be used to create a stand-alone PVC to provision the volume. +/// The pod in which this EphemeralVolumeSource is embedded will be the +/// owner of the PVC, i.e. the PVC will be deleted together with the +/// pod. The name of the PVC will be `-` where +/// `` is the name from the `PodSpec.Volumes` array +/// entry. Pod validation will reject the pod if the concatenated name +/// is not valid for a PVC (for example, too long). +/// +/// +/// An existing PVC with that name that is not owned by the pod +/// will *not* be used for the pod to avoid using an unrelated +/// volume by mistake. Starting the pod is then blocked until +/// the unrelated PVC is removed. If such a pre-created PVC is +/// meant to be used by the pod, the PVC has to updated with an +/// owner reference to the pod once the pod exists. Normally +/// this should not be necessary, but it may be useful when +/// manually reconstructing a broken cluster. +/// +/// +/// This field is read-only and no changes will be made by Kubernetes +/// to the PVC after it has been created. +/// +/// +/// Required, must not be nil. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplate { - /// May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + /// May contain labels and annotations that will be copied into the PVC + /// when creating it. No other fields are allowed and will be rejected during + /// validation. #[serde(default, skip_serializing_if = "Option::is_none")] pub metadata: Option, - /// The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + /// The specification for the PersistentVolumeClaim. The entire content is + /// copied unchanged into the PVC that gets created from this + /// template. The same fields as in a PersistentVolumeClaim + /// are also valid here. pub spec: WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpec, } -/// May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. +/// May contain labels and annotations that will be copied into the PVC +/// when creating it. No other fields are allowed and will be rejected during +/// validation. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateMetadata { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -3673,31 +5651,81 @@ pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateMetadat pub namespace: Option, } -/// The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. +/// The specification for the PersistentVolumeClaim. The entire content is +/// copied unchanged into the PVC that gets created from this +/// template. The same fields as in a PersistentVolumeClaim +/// are also valid here. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpec { - /// accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + /// accessModes contains the desired access modes the volume should have. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessModes")] pub access_modes: Option>, - /// dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. + /// dataSource field can be used to specify either: + /// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + /// * An existing PVC (PersistentVolumeClaim) + /// If the provisioner or an external controller can support the specified data source, + /// it will create a new volume based on the contents of the specified data source. + /// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + /// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + /// If the namespace is specified, then dataSourceRef will not be copied to dataSource. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSource")] pub data_source: Option, - /// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + /// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + /// volume is desired. This may be any object from a non-empty API group (non + /// core object) or a PersistentVolumeClaim object. + /// When this field is specified, volume binding will only succeed if the type of + /// the specified object matches some installed volume populator or dynamic + /// provisioner. + /// This field will replace the functionality of the dataSource field and as such + /// if both fields are non-empty, they must have the same value. For backwards + /// compatibility, when namespace isn't specified in dataSourceRef, + /// both fields (dataSource and dataSourceRef) will be set to the same + /// value automatically if one of them is empty and the other is non-empty. + /// When namespace is specified in dataSourceRef, + /// dataSource isn't set to the same value and must be empty. + /// There are three important differences between dataSource and dataSourceRef: + /// * While dataSource only allows two specific types of objects, dataSourceRef + /// allows any non-core object, as well as PersistentVolumeClaim objects. + /// * While dataSource ignores disallowed values (dropping them), dataSourceRef + /// preserves all values, and generates an error if a disallowed value is + /// specified. + /// * While dataSource only allows local objects, dataSourceRef allows objects + /// in any namespaces. + /// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + /// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSourceRef")] pub data_source_ref: Option, - /// resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + /// resources represents the minimum resources the volume should have. + /// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + /// that are lower than previous value but must still be higher than capacity recorded in the + /// status field of the claim. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources #[serde(default, skip_serializing_if = "Option::is_none")] pub resources: Option, /// selector is a label query over volumes to consider for binding. #[serde(default, skip_serializing_if = "Option::is_none")] pub selector: Option, - /// storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + /// storageClassName is the name of the StorageClass required by the claim. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageClassName")] pub storage_class_name: Option, - /// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + /// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + /// If specified, the CSI driver will create or update the volume with the attributes defined + /// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + /// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + /// will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + /// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + /// will be set by the persistentvolume controller if it exists. + /// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + /// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + /// exists. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + /// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeAttributesClassName")] pub volume_attributes_class_name: Option, - /// volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + /// volumeMode defines what type of volume is required by the claim. + /// Value of Filesystem is implied when not included in claim spec. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeMode")] pub volume_mode: Option, /// volumeName is the binding reference to the PersistentVolume backing this claim. @@ -3705,10 +5733,19 @@ pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpec { pub volume_name: Option, } -/// dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource. +/// dataSource field can be used to specify either: +/// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) +/// * An existing PVC (PersistentVolumeClaim) +/// If the provisioner or an external controller can support the specified data source, +/// it will create a new volume based on the contents of the specified data source. +/// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, +/// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. +/// If the namespace is specified, then dataSourceRef will not be copied to dataSource. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecDataSource { - /// APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + /// APIGroup is the group for the resource being referenced. + /// If APIGroup is not specified, the specified Kind must be in the core API group. + /// For any other third-party types, APIGroup is required. #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiGroup")] pub api_group: Option, /// Kind is the type of resource being referenced @@ -3717,28 +5754,62 @@ pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecDat pub name: String, } -/// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. +/// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty +/// volume is desired. This may be any object from a non-empty API group (non +/// core object) or a PersistentVolumeClaim object. +/// When this field is specified, volume binding will only succeed if the type of +/// the specified object matches some installed volume populator or dynamic +/// provisioner. +/// This field will replace the functionality of the dataSource field and as such +/// if both fields are non-empty, they must have the same value. For backwards +/// compatibility, when namespace isn't specified in dataSourceRef, +/// both fields (dataSource and dataSourceRef) will be set to the same +/// value automatically if one of them is empty and the other is non-empty. +/// When namespace is specified in dataSourceRef, +/// dataSource isn't set to the same value and must be empty. +/// There are three important differences between dataSource and dataSourceRef: +/// * While dataSource only allows two specific types of objects, dataSourceRef +/// allows any non-core object, as well as PersistentVolumeClaim objects. +/// * While dataSource ignores disallowed values (dropping them), dataSourceRef +/// preserves all values, and generates an error if a disallowed value is +/// specified. +/// * While dataSource only allows local objects, dataSourceRef allows objects +/// in any namespaces. +/// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. +/// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecDataSourceRef { - /// APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + /// APIGroup is the group for the resource being referenced. + /// If APIGroup is not specified, the specified Kind must be in the core API group. + /// For any other third-party types, APIGroup is required. #[serde(default, skip_serializing_if = "Option::is_none", rename = "apiGroup")] pub api_group: Option, /// Kind is the type of resource being referenced pub kind: String, /// Name is the name of resource being referenced pub name: String, - /// Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + /// Namespace is the namespace of resource being referenced + /// Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + /// (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. #[serde(default, skip_serializing_if = "Option::is_none")] pub namespace: Option, } -/// resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources +/// resources represents the minimum resources the volume should have. +/// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements +/// that are lower than previous value but must still be higher than capacity recorded in the +/// status field of the claim. +/// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecResources { - /// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Limits describes the maximum amount of compute resources allowed. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub limits: Option>, - /// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + /// Requests describes the minimum amount of compute resources required. + /// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + /// otherwise to an implementation-defined value. Requests cannot exceed Limits. + /// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ #[serde(default, skip_serializing_if = "Option::is_none")] pub requests: Option>, } @@ -3749,19 +5820,26 @@ pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecSel /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -3769,46 +5847,65 @@ pub struct WorkloadPodSetsTemplateSpecVolumesEphemeralVolumeClaimTemplateSpecSel /// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesFc { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// lun is Optional: FC target lun number #[serde(default, skip_serializing_if = "Option::is_none")] pub lun: Option, - /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// targetWWNs is Optional: FC target worldwide names (WWNs) #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetWWNs")] pub target_ww_ns: Option>, - /// wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + /// wwids Optional: FC volume world wide identifiers (wwids) + /// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. #[serde(default, skip_serializing_if = "Option::is_none")] pub wwids: Option>, } -/// flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. +/// flexVolume represents a generic volume resource that is +/// provisioned/attached using an exec based plugin. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesFlexVolume { /// driver is the name of the driver to use for this volume. pub driver: String, - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// options is Optional: this field holds extra command options if any. #[serde(default, skip_serializing_if = "Option::is_none")] pub options: Option>, - /// readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly is Optional: defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. + /// secretRef is Optional: secretRef is reference to the secret object containing + /// sensitive information to pass to the plugin scripts. This may be + /// empty if no secret object is specified. If the secret object + /// contains more than one secret, all secrets are passed to the plugin + /// scripts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, } -/// secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. +/// secretRef is Optional: secretRef is reference to the secret object containing +/// sensitive information to pass to the plugin scripts. This may be +/// empty if no secret object is specified. If the secret object +/// contains more than one secret, all secrets are passed to the plugin +/// scripts. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesFlexVolumeSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -3816,7 +5913,8 @@ pub struct WorkloadPodSetsTemplateSpecVolumesFlexVolumeSecretRef { /// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesFlocker { - /// datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + /// datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + /// should be considered as deprecated #[serde(default, skip_serializing_if = "Option::is_none", rename = "datasetName")] pub dataset_name: Option, /// datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset @@ -3824,27 +5922,46 @@ pub struct WorkloadPodSetsTemplateSpecVolumesFlocker { pub dataset_uuid: Option, } -/// gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk +/// gcePersistentDisk represents a GCE Disk resource that is attached to a +/// kubelet's host machine and then exposed to the pod. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesGcePersistentDisk { - /// fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// partition is the partition in the volume that you want to mount. + /// If omitted, the default is to mount by volume name. + /// Examples: For volume /dev/sda1, you specify the partition as "1". + /// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(default, skip_serializing_if = "Option::is_none")] pub partition: Option, - /// pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(rename = "pdName")] pub pd_name: String, - /// readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + /// readOnly here will force the ReadOnly setting in VolumeMounts. + /// Defaults to false. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } -/// gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. +/// gitRepo represents a git repository at a particular revision. +/// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +/// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +/// into the Pod's container. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesGitRepo { - /// directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + /// directory is the target directory name. + /// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + /// git repository. Otherwise, if specified, the volume will contain the git repository in + /// the subdirectory with the given name. #[serde(default, skip_serializing_if = "Option::is_none")] pub directory: Option, /// repository is the URL @@ -3854,29 +5971,47 @@ pub struct WorkloadPodSetsTemplateSpecVolumesGitRepo { pub revision: Option, } -/// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md +/// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. +/// More info: https://examples.k8s.io/volumes/glusterfs/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesGlusterfs { - /// endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + /// endpoints is the endpoint name that details Glusterfs topology. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod pub endpoints: String, - /// path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + /// path is the Glusterfs volume path. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod pub path: String, - /// readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + /// readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + /// Defaults to false. + /// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } -/// hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write. +/// hostPath represents a pre-existing file or directory on the host +/// machine that is directly exposed to the container. This is generally +/// used for system agents or other privileged things that are allowed +/// to see the host machine. Most containers will NOT need this. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath +/// --- +/// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not +/// mount host directories as read/write. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesHostPath { - /// path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + /// path of the directory on the host. + /// If the path is a symlink, it will follow the link to the real path. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath pub path: String, - /// type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + /// type for HostPath Volume + /// Defaults to "" + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath #[serde(default, skip_serializing_if = "Option::is_none", rename = "type")] pub r#type: Option, } -/// iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md +/// iscsi represents an ISCSI Disk resource that is attached to a +/// kubelet's host machine and then exposed to the pod. +/// More info: https://examples.k8s.io/volumes/iscsi/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesIscsi { /// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication @@ -3885,29 +6020,39 @@ pub struct WorkloadPodSetsTemplateSpecVolumesIscsi { /// chapAuthSession defines whether support iSCSI Session CHAP authentication #[serde(default, skip_serializing_if = "Option::is_none", rename = "chapAuthSession")] pub chap_auth_session: Option, - /// fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + /// initiatorName is the custom iSCSI Initiator Name. + /// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + /// : will be created for the connection. #[serde(default, skip_serializing_if = "Option::is_none", rename = "initiatorName")] pub initiator_name: Option, /// iqn is the target iSCSI Qualified Name. pub iqn: String, - /// iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + /// iscsiInterface is the interface Name that uses an iSCSI transport. + /// Defaults to 'default' (tcp). #[serde(default, skip_serializing_if = "Option::is_none", rename = "iscsiInterface")] pub iscsi_interface: Option, /// lun represents iSCSI Target Lun number. pub lun: i32, - /// portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + /// portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + /// is other than default (typically TCP ports 860 and 3260). #[serde(default, skip_serializing_if = "Option::is_none")] pub portals: Option>, - /// readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + /// readOnly here will force the ReadOnly setting in VolumeMounts. + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// secretRef is the CHAP Secret for iSCSI target and initiator authentication #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + /// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + /// is other than default (typically TCP ports 860 and 3260). #[serde(rename = "targetPortal")] pub target_portal: String, } @@ -3915,30 +6060,41 @@ pub struct WorkloadPodSetsTemplateSpecVolumesIscsi { /// secretRef is the CHAP Secret for iSCSI target and initiator authentication #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesIscsiSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs +/// nfs represents an NFS mount on the host that shares a pod's lifetime +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesNfs { - /// path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// path that is exported by the NFS server. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs pub path: String, - /// readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// readOnly here will force the NFS export to be mounted with read-only permissions. + /// Defaults to false. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + /// server is the hostname or IP address of the NFS server. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs pub server: String, } -/// persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims +/// persistentVolumeClaimVolumeSource represents a reference to a +/// PersistentVolumeClaim in the same namespace. +/// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesPersistentVolumeClaim { - /// claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + /// claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + /// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims #[serde(rename = "claimName")] pub claim_name: String, - /// readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + /// readOnly Will force the ReadOnly setting in VolumeMounts. + /// Default false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, } @@ -3946,7 +6102,9 @@ pub struct WorkloadPodSetsTemplateSpecVolumesPersistentVolumeClaim { /// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesPhotonPersistentDisk { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// pdID is the ID that identifies Photon Controller persistent disk @@ -3957,10 +6115,13 @@ pub struct WorkloadPodSetsTemplateSpecVolumesPhotonPersistentDisk { /// portworxVolume represents a portworx volume attached and mounted on kubelets host machine #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesPortworxVolume { - /// fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + /// fSType represents the filesystem type to mount + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, /// volumeID uniquely identifies a Portworx volume @@ -3971,7 +6132,12 @@ pub struct WorkloadPodSetsTemplateSpecVolumesPortworxVolume { /// projected items for all in one resources secrets, configmaps, and downward API #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjected { - /// defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// defaultMode are the mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, /// sources is the list of volume projections @@ -3982,10 +6148,22 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjected { /// Projection that may be projected along with other supported volume types #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSources { - /// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. - /// Alpha, gated by the ClusterTrustBundleProjection feature gate. - /// ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. - /// Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time. + /// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + /// of ClusterTrustBundle objects in an auto-updating file. + /// + /// + /// Alpha, gated by the ClusterTrustBundleProjection feature gate. + /// + /// + /// ClusterTrustBundle objects can either be selected by name, or by the + /// combination of signer name and a label selector. + /// + /// + /// Kubelet performs aggressive normalization of the PEM contents written + /// into the pod filesystem. Esoteric PEM features such as inter-block + /// comments and block headers are stripped. Certificates are deduplicated. + /// The ordering of certificates within the file is arbitrary, and Kubelet + /// may change the order over time. #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterTrustBundle")] pub cluster_trust_bundle: Option, /// configMap information about the configMap data to project @@ -4002,47 +6180,79 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSources { pub service_account_token: Option, } -/// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. -/// Alpha, gated by the ClusterTrustBundleProjection feature gate. -/// ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. -/// Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time. +/// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field +/// of ClusterTrustBundle objects in an auto-updating file. +/// +/// +/// Alpha, gated by the ClusterTrustBundleProjection feature gate. +/// +/// +/// ClusterTrustBundle objects can either be selected by name, or by the +/// combination of signer name and a label selector. +/// +/// +/// Kubelet performs aggressive normalization of the PEM contents written +/// into the pod filesystem. Esoteric PEM features such as inter-block +/// comments and block headers are stripped. Certificates are deduplicated. +/// The ordering of certificates within the file is arbitrary, and Kubelet +/// may change the order over time. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesClusterTrustBundle { - /// Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything". + /// Select all ClusterTrustBundles that match this label selector. Only has + /// effect if signerName is set. Mutually-exclusive with name. If unset, + /// interpreted as "match nothing". If set but empty, interpreted as "match + /// everything". #[serde(default, skip_serializing_if = "Option::is_none", rename = "labelSelector")] pub label_selector: Option, - /// Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector. + /// Select a single ClusterTrustBundle by object name. Mutually-exclusive + /// with signerName and labelSelector. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles. + /// If true, don't block pod startup if the referenced ClusterTrustBundle(s) + /// aren't available. If using name, then the named ClusterTrustBundle is + /// allowed not to exist. If using signerName, then the combination of + /// signerName and labelSelector is allowed to match zero + /// ClusterTrustBundles. #[serde(default, skip_serializing_if = "Option::is_none")] pub optional: Option, /// Relative path from the volume root to write the bundle. pub path: String, - /// Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated. + /// Select all ClusterTrustBundles that match this signer name. + /// Mutually-exclusive with name. The contents of all selected + /// ClusterTrustBundles will be unified and deduplicated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "signerName")] pub signer_name: Option, } -/// Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as "match nothing". If set but empty, interpreted as "match everything". +/// Select all ClusterTrustBundles that match this label selector. Only has +/// effect if signerName is set. Mutually-exclusive with name. If unset, +/// interpreted as "match nothing". If set but empty, interpreted as "match +/// everything". #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesClusterTrustBundleLabelSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesClusterTrustBundleLabelSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -4050,10 +6260,18 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesClusterTrustBundleL /// configMap information about the configMap data to project #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesConfigMap { - /// items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items if unspecified, each key-value pair in the Data field of the referenced + /// ConfigMap will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the ConfigMap, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// optional specify whether the ConfigMap or its keys must be defined @@ -4066,10 +6284,18 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesConfigMap { pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesConfigMapItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } @@ -4087,12 +6313,18 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesDownwardApiItems { /// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fieldRef")] pub field_ref: Option, - /// Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// Optional: mode bits used to set permissions on this file, must be an octal value + /// between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, /// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' pub path: String, - /// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + /// Selects a resource of the container: only resources limits and requests + /// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceFieldRef")] pub resource_field_ref: Option, } @@ -4108,7 +6340,8 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesDownwardApiItemsFie pub field_path: String, } -/// Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. +/// Selects a resource of the container: only resources limits and requests +/// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesDownwardApiItemsResourceFieldRef { /// Container name: required for volumes, optional for env vars @@ -4124,10 +6357,18 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesDownwardApiItemsRes /// secret information about the secret data to project #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesSecret { - /// items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items if unspecified, each key-value pair in the Data field of the referenced + /// Secret will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the Secret, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, /// optional field specify whether the Secret or its key must be defined @@ -4140,78 +6381,124 @@ pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesSecret { pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesSecretItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } /// serviceAccountToken is information about the serviceAccountToken data to project #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesProjectedSourcesServiceAccountToken { - /// audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + /// audience is the intended audience of the token. A recipient of a token + /// must identify itself with an identifier specified in the audience of the + /// token, and otherwise should reject the token. The audience defaults to the + /// identifier of the apiserver. #[serde(default, skip_serializing_if = "Option::is_none")] pub audience: Option, - /// expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + /// expirationSeconds is the requested duration of validity of the service + /// account token. As the token approaches expiration, the kubelet volume + /// plugin will proactively rotate the service account token. The kubelet will + /// start trying to rotate the token if the token is older than 80 percent of + /// its time to live or if the token is older than 24 hours.Defaults to 1 hour + /// and must be at least 10 minutes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "expirationSeconds")] pub expiration_seconds: Option, - /// path is the path relative to the mount point of the file to project the token into. + /// path is the path relative to the mount point of the file to project the + /// token into. pub path: String, } /// quobyte represents a Quobyte mount on the host that shares a pod's lifetime #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesQuobyte { - /// group to map volume access to Default is no group + /// group to map volume access to + /// Default is no group #[serde(default, skip_serializing_if = "Option::is_none")] pub group: Option, - /// readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + /// readOnly here will force the Quobyte volume to be mounted with read-only permissions. + /// Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + /// registry represents a single or multiple Quobyte Registry services + /// specified as a string as host:port pair (multiple entries are separated with commas) + /// which acts as the central registry for volumes pub registry: String, - /// tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + /// tenant owning the given Quobyte volume in the Backend + /// Used with dynamically provisioned Quobyte volumes, value is set by the plugin #[serde(default, skip_serializing_if = "Option::is_none")] pub tenant: Option, - /// user to map volume access to Defaults to serivceaccount user + /// user to map volume access to + /// Defaults to serivceaccount user #[serde(default, skip_serializing_if = "Option::is_none")] pub user: Option, /// volume is a string that references an already created Quobyte volume by name. pub volume: String, } -/// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md +/// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. +/// More info: https://examples.k8s.io/volumes/rbd/README.md #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesRbd { - /// fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine + /// fsType is the filesystem type of the volume that you want to mount. + /// Tip: Ensure that the filesystem type is supported by the host operating system. + /// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + /// TODO: how do we prevent errors in the filesystem from compromising the machine #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// image is the rados image name. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it pub image: String, - /// keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// keyring is the path to key ring for RBDUser. + /// Default is /etc/ceph/keyring. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub keyring: Option, - /// monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// monitors is a collection of Ceph monitors. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it pub monitors: Vec, - /// pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// pool is the rados pool name. + /// Default is rbd. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub pool: Option, - /// readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// readOnly here will force the ReadOnly setting in VolumeMounts. + /// Defaults to false. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// secretRef is name of the authentication secret for RBDUser. If provided + /// overrides keyring. + /// Default is nil. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + /// user is the rados user name. + /// Default is admin. + /// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[serde(default, skip_serializing_if = "Option::is_none")] pub user: Option, } -/// secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it +/// secretRef is name of the authentication secret for RBDUser. If provided +/// overrides keyring. +/// Default is nil. +/// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesRbdSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -4219,7 +6506,10 @@ pub struct WorkloadPodSetsTemplateSpecVolumesRbdSecretRef { /// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesScaleIo { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". + /// Default is "xfs". #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// gateway is the host address of the ScaleIO API Gateway. @@ -4227,16 +6517,19 @@ pub struct WorkloadPodSetsTemplateSpecVolumesScaleIo { /// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. #[serde(default, skip_serializing_if = "Option::is_none", rename = "protectionDomain")] pub protection_domain: Option, - /// readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly Defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + /// secretRef references to the secret for ScaleIO user and other + /// sensitive information. If this is not provided, Login operation will fail. #[serde(rename = "secretRef")] pub secret_ref: WorkloadPodSetsTemplateSpecVolumesScaleIoSecretRef, /// sslEnabled Flag enable/disable SSL communication with Gateway, default false #[serde(default, skip_serializing_if = "Option::is_none", rename = "sslEnabled")] pub ssl_enabled: Option, - /// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + /// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + /// Default is ThinProvisioned. #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageMode")] pub storage_mode: Option, /// storagePool is the ScaleIO Storage Pool associated with the protection domain. @@ -4244,32 +6537,50 @@ pub struct WorkloadPodSetsTemplateSpecVolumesScaleIo { pub storage_pool: Option, /// system is the name of the storage system as configured in ScaleIO. pub system: String, - /// volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + /// volumeName is the name of a volume already created in the ScaleIO system + /// that is associated with this volume source. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeName")] pub volume_name: Option, } -/// secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. +/// secretRef references to the secret for ScaleIO user and other +/// sensitive information. If this is not provided, Login operation will fail. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesScaleIoSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret +/// secret represents a secret that should populate this volume. +/// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesSecret { - /// defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// defaultMode is Optional: mode bits used to set permissions on created files by default. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values + /// for mode bits. Defaults to 0644. + /// Directories within the path are not affected by this setting. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultMode")] pub default_mode: Option, - /// items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + /// items If unspecified, each key-value pair in the Data field of the referenced + /// Secret will be projected into the volume as a file whose name is the + /// key and content is the value. If specified, the listed keys will be + /// projected into the specified paths, and unlisted keys will not be + /// present. If a key is specified which is not present in the Secret, + /// the volume setup will error unless it is marked optional. Paths must be + /// relative and may not contain the '..' path or start with '..'. #[serde(default, skip_serializing_if = "Option::is_none")] pub items: Option>, /// optional field specify whether the Secret or its keys must be defined #[serde(default, skip_serializing_if = "Option::is_none")] pub optional: Option, - /// secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + /// secretName is the name of the secret in the pod's namespace to use. + /// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretName")] pub secret_name: Option, } @@ -4279,37 +6590,58 @@ pub struct WorkloadPodSetsTemplateSpecVolumesSecret { pub struct WorkloadPodSetsTemplateSpecVolumesSecretItems { /// key is the key to project. pub key: String, - /// mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + /// mode is Optional: mode bits used to set permissions on this file. + /// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + /// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + /// If not specified, the volume defaultMode will be used. + /// This might be in conflict with other options that affect the file + /// mode, like fsGroup, and the result can be other mode bits set. #[serde(default, skip_serializing_if = "Option::is_none")] pub mode: Option, - /// path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + /// path is the relative path of the file to map the key to. + /// May not be an absolute path. + /// May not contain the path element '..'. + /// May not start with the string '..'. pub path: String, } /// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesStorageos { - /// fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is the filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, - /// readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + /// readOnly defaults to false (read/write). ReadOnly here will force + /// the ReadOnly setting in VolumeMounts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readOnly")] pub read_only: Option, - /// secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + /// secretRef specifies the secret to use for obtaining the StorageOS API + /// credentials. If not specified, default values will be attempted. #[serde(default, skip_serializing_if = "Option::is_none", rename = "secretRef")] pub secret_ref: Option, - /// volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + /// volumeName is the human-readable name of the StorageOS volume. Volume + /// names are only unique within a namespace. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeName")] pub volume_name: Option, - /// volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + /// volumeNamespace specifies the scope of the volume within StorageOS. If no + /// namespace is specified then the Pod's namespace will be used. This allows the + /// Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + /// Set VolumeName to any name to override the default behaviour. + /// Set to "default" if you are not using namespaces within StorageOS. + /// Namespaces that do not pre-exist within StorageOS will be created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeNamespace")] pub volume_namespace: Option, } -/// secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. +/// secretRef specifies the secret to use for obtaining the StorageOS API +/// credentials. If not specified, default values will be attempted. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesStorageosSecretRef { - /// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid? + /// Name of the referent. + /// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + /// TODO: Add other useful fields. apiVersion, kind, uid? #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } @@ -4317,7 +6649,9 @@ pub struct WorkloadPodSetsTemplateSpecVolumesStorageosSecretRef { /// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadPodSetsTemplateSpecVolumesVsphereVolume { - /// fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + /// fsType is filesystem type to mount. + /// Must be a filesystem type supported by the host operating system. + /// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "fsType")] pub fs_type: Option, /// storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. @@ -4345,23 +6679,40 @@ pub enum WorkloadPriorityClassSource { /// WorkloadStatus defines the observed state of Workload #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadStatus { - /// admission holds the parameters of the admission of the workload by a ClusterQueue. admission can be set back to null, but its fields cannot be changed once set. + /// admission holds the parameters of the admission of the workload by a + /// ClusterQueue. admission can be set back to null, but its fields cannot be + /// changed once set. #[serde(default, skip_serializing_if = "Option::is_none")] pub admission: Option, /// admissionChecks list all the admission checks required by the workload and the current status #[serde(default, skip_serializing_if = "Option::is_none", rename = "admissionChecks")] pub admission_checks: Option>, - /// conditions hold the latest available observations of the Workload current state. - /// The type of the condition could be: - /// - Admitted: the Workload was admitted through a ClusterQueue. - Finished: the associated workload finished running (failed or succeeded). - PodsReady: at least `.spec.podSets[*].count` Pods are ready or have succeeded. + /// conditions hold the latest available observations of the Workload + /// current state. + /// + /// + /// The type of the condition could be: + /// + /// + /// - Admitted: the Workload was admitted through a ClusterQueue. + /// - Finished: the associated workload finished running (failed or succeeded). + /// - PodsReady: at least `.spec.podSets[*].count` Pods are ready or have + /// succeeded. #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// reclaimablePods keeps track of the number pods within a podset for which the resource reservation is no longer needed. + /// reclaimablePods keeps track of the number pods within a podset for which + /// the resource reservation is no longer needed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "reclaimablePods")] pub reclaimable_pods: Option>, + /// requeueState holds the re-queue state + /// when a workload meets Eviction with PodsReadyTimeout reason. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requeueState")] + pub requeue_state: Option, } -/// admission holds the parameters of the admission of the workload by a ClusterQueue. admission can be set back to null, but its fields cannot be changed once set. +/// admission holds the parameters of the admission of the workload by a +/// ClusterQueue. admission can be set back to null, but its fields cannot be +/// changed once set. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadStatusAdmission { /// clusterQueue is the name of the ClusterQueue that admitted this workload. @@ -4374,7 +6725,10 @@ pub struct WorkloadStatusAdmission { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadStatusAdmissionPodSetAssignments { - /// count is the number of pods taken into account at admission time. This field will not change in case of quota reclaim. Value could be missing for Workloads created before this field was added, in that case spec.podSets[*].count value will be used. + /// count is the number of pods taken into account at admission time. + /// This field will not change in case of quota reclaim. + /// Value could be missing for Workloads created before this field was added, + /// in that case spec.podSets[*].count value will be used. #[serde(default, skip_serializing_if = "Option::is_none")] pub count: Option, /// Flavors are the flavors assigned to the workload for each resource. @@ -4382,18 +6736,24 @@ pub struct WorkloadStatusAdmissionPodSetAssignments { pub flavors: Option>, /// Name is the name of the podSet. It should match one of the names in .spec.podSets. pub name: String, - /// resourceUsage keeps track of the total resources all the pods in the podset need to run. - /// Beside what is provided in podSet's specs, this calculation takes into account the LimitRange defaults and RuntimeClass overheads at the moment of admission. This field will not change in case of quota reclaim. + /// resourceUsage keeps track of the total resources all the pods in the podset need to run. + /// + /// + /// Beside what is provided in podSet's specs, this calculation takes into account + /// the LimitRange defaults and RuntimeClass overheads at the moment of admission. + /// This field will not change in case of quota reclaim. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceUsage")] pub resource_usage: Option>, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadStatusAdmissionChecks { - /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + /// lastTransitionTime is the last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. #[serde(rename = "lastTransitionTime")] pub last_transition_time: String, - /// message is a human readable message indicating details about the transition. This may be an empty string. + /// message is a human readable message indicating details about the transition. + /// This may be an empty string. pub message: String, /// name identifies the admission check. pub name: String, @@ -4403,7 +6763,10 @@ pub struct WorkloadStatusAdmissionChecks { pub state: WorkloadStatusAdmissionChecksState, } -/// PodSetUpdate contains a list of pod set modifications suggested by AdmissionChecks. The modifications should be additive only - modifications of already existing keys or having the same key provided by multiple AdmissionChecks is not allowed and will result in failure during workload admission. +/// PodSetUpdate contains a list of pod set modifications suggested by AdmissionChecks. +/// The modifications should be additive only - modifications of already existing keys +/// or having the same key provided by multiple AdmissionChecks is not allowed and will +/// result in failure during workload admission. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadStatusAdmissionChecksPodSetUpdates { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -4418,22 +6781,32 @@ pub struct WorkloadStatusAdmissionChecksPodSetUpdates { pub tolerations: Option>, } -/// The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +/// The pod this Toleration is attached to tolerates any taint that matches +/// the triple using the matching operator . #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadStatusAdmissionChecksPodSetUpdatesTolerations { - /// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + /// Effect indicates the taint effect to match. Empty means match all taint effects. + /// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. #[serde(default, skip_serializing_if = "Option::is_none")] pub effect: Option, - /// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + /// Key is the taint key that the toleration applies to. Empty means match all taint keys. + /// If the key is empty, operator must be Exists; this combination means to match all values and all keys. #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option, - /// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + /// Operator represents a key's relationship to the value. + /// Valid operators are Exists and Equal. Defaults to Equal. + /// Exists is equivalent to wildcard for value, so that a pod can + /// tolerate all taints of a particular category. #[serde(default, skip_serializing_if = "Option::is_none")] pub operator: Option, - /// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + /// TolerationSeconds represents the period of time the toleration (which must be + /// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + /// it is not set, which means tolerate the taint forever (do not evict). Zero and + /// negative values will be treated as 0 (evict immediately) by the system. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tolerationSeconds")] pub toleration_seconds: Option, - /// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + /// Value is the taint value the toleration matches to. + /// If the operator is Exists, the value should be empty, otherwise just a regular string. #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, } @@ -4446,31 +6819,71 @@ pub enum WorkloadStatusAdmissionChecksState { Rejected, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WorkloadStatusConditions { - /// lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + /// lastTransitionTime is the last time the condition transitioned from one status to another. + /// This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. #[serde(rename = "lastTransitionTime")] pub last_transition_time: String, - /// message is a human readable message indicating details about the transition. This may be an empty string. + /// message is a human readable message indicating details about the transition. + /// This may be an empty string. pub message: String, - /// observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + /// observedGeneration represents the .metadata.generation that the condition was set based upon. + /// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + /// with respect to the current state of the instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] pub observed_generation: Option, - /// reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + /// reason contains a programmatic identifier indicating the reason for the condition's last transition. + /// Producers of specific condition types may define expected values and meanings for this field, + /// and whether the values are considered a guaranteed API. + /// The value should be a CamelCase string. + /// This field may not be empty. pub reason: String, /// status of the condition, one of True, False, Unknown. pub status: WorkloadStatusConditionsStatus, - /// type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + /// type of condition in CamelCase or in foo.example.com/CamelCase. + /// --- + /// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + /// useful (see .node.status.conditions), the ability to deconflict is important. + /// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) #[serde(rename = "type")] pub r#type: String, } -/// Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, -/// type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -/// // other fields } +/// Condition contains details for one aspect of the current state of this API Resource. +/// --- +/// This struct is intended for direct use as an array at the field path .status.conditions. For example, +/// +/// +/// type FooStatus struct{ +/// // Represents the observations of a foo's current state. +/// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +/// // +patchMergeKey=type +/// // +patchStrategy=merge +/// // +listType=map +/// // +listMapKey=type +/// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +/// +/// +/// // other fields +/// } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum WorkloadStatusConditionsStatus { True, @@ -4486,3 +6899,19 @@ pub struct WorkloadStatusReclaimablePods { pub name: String, } +/// requeueState holds the re-queue state +/// when a workload meets Eviction with PodsReadyTimeout reason. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WorkloadStatusRequeueState { + /// count records the number of times a workload has been re-queued + /// When a deactivated (`.spec.activate`=`false`) workload is reactivated (`.spec.activate`=`true`), + /// this count would be reset to null. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub count: Option, + /// requeueAt records the time when a workload will be re-queued. + /// When a deactivated (`.spec.activate`=`false`) workload is reactivated (`.spec.activate`=`true`), + /// this time would be reset to null. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "requeueAt")] + pub requeue_at: Option, +} + diff --git a/kube-custom-resources-rs/src/lib.rs b/kube-custom-resources-rs/src/lib.rs index ad335774a..782b3d5f7 100644 --- a/kube-custom-resources-rs/src/lib.rs +++ b/kube-custom-resources-rs/src/lib.rs @@ -34,6 +34,10 @@ apiVersion `actions.summerwind.dev/v1alpha1`: ## addons_cluster_x_k8s_io +apiVersion `addons.cluster.x-k8s.io/v1alpha3`: +- `ClusterResourceSetBinding` +- `ClusterResourceSet` + apiVersion `addons.cluster.x-k8s.io/v1alpha4`: - `ClusterResourceSet` @@ -599,6 +603,14 @@ apiVersion `cluster.ipfs.io/v1alpha1`: ## cluster_x_k8s_io +apiVersion `cluster.x-k8s.io/v1alpha3`: +- `Cluster` +- `MachineDeployment` +- `MachineHealthCheck` +- `MachinePool` +- `Machine` +- `MachineSet` + apiVersion `cluster.x-k8s.io/v1alpha4`: - `ClusterClass` - `Cluster` @@ -1213,6 +1225,25 @@ apiVersion `infrastructure.cluster.x-k8s.io/v1alpha1`: - `KubevirtMachine` - `KubevirtMachineTemplate` +apiVersion `infrastructure.cluster.x-k8s.io/v1alpha3`: +- `VSphereClusterIdentity` +- `VSphereCluster` +- `VSphereDeploymentZone` +- `VSphereFailureDomain` +- `VSphereMachine` +- `VSphereMachineTemplate` +- `VSphereVM` + +apiVersion `infrastructure.cluster.x-k8s.io/v1alpha4`: +- `VSphereClusterIdentity` +- `VSphereCluster` +- `VSphereClusterTemplate` +- `VSphereDeploymentZone` +- `VSphereFailureDomain` +- `VSphereMachine` +- `VSphereMachineTemplate` +- `VSphereVM` + apiVersion `infrastructure.cluster.x-k8s.io/v1beta1`: - `IBMPowerVSCluster` - `IBMPowerVSClusterTemplate` diff --git a/kube-custom-resources-rs/src/loki_grafana_com/v1/lokistacks.rs b/kube-custom-resources-rs/src/loki_grafana_com/v1/lokistacks.rs index 1906237cf..6440af44c 100644 --- a/kube-custom-resources-rs/src/loki_grafana_com/v1/lokistacks.rs +++ b/kube-custom-resources-rs/src/loki_grafana_com/v1/lokistacks.rs @@ -2233,11 +2233,25 @@ pub enum LokiStackStatusConditionsStatus { /// Storage provides summary of all changes that have occurred to the storage configuration. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct LokiStackStatusStorage { + /// CredentialMode contains the authentication mode used for accessing the object storage. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "credentialMode")] + pub credential_mode: Option, /// Schemas is a list of schemas which have been applied to the LokiStack. #[serde(default, skip_serializing_if = "Option::is_none")] pub schemas: Option>, } +/// Storage provides summary of all changes that have occurred to the storage configuration. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum LokiStackStatusStorageCredentialMode { + #[serde(rename = "static")] + Static, + #[serde(rename = "token")] + Token, + #[serde(rename = "managed")] + Managed, +} + /// ObjectStorageSchema defines the requirements needed to configure a new storage schema. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct LokiStackStatusStorageSchemas { diff --git a/kube-custom-resources-rs/src/machineconfiguration_openshift_io/v1/controllerconfigs.rs b/kube-custom-resources-rs/src/machineconfiguration_openshift_io/v1/controllerconfigs.rs index b71bf6237..76bd9c31f 100644 --- a/kube-custom-resources-rs/src/machineconfiguration_openshift_io/v1/controllerconfigs.rs +++ b/kube-custom-resources-rs/src/machineconfiguration_openshift_io/v1/controllerconfigs.rs @@ -614,6 +614,10 @@ pub struct ControllerConfigInfraSpecPlatformSpecVsphereFailureDomainsTopology { /// resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourcePool")] pub resource_pool: Option, + /// template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters. + /// When omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub template: Option, } /// nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found. diff --git a/kube-custom-resources-rs/src/mariadb_mmontes_io/v1alpha1/mariadbs.rs b/kube-custom-resources-rs/src/mariadb_mmontes_io/v1alpha1/mariadbs.rs index 64a12cb78..b51946fc9 100644 --- a/kube-custom-resources-rs/src/mariadb_mmontes_io/v1alpha1/mariadbs.rs +++ b/kube-custom-resources-rs/src/mariadb_mmontes_io/v1alpha1/mariadbs.rs @@ -62,10 +62,10 @@ pub struct MariaDBSpec { /// LivenessProbe to be used in the Container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// MaxScale is the MaxScale specification that defines the MaxScale instance that will be used with MariaDB. When enabling this field, MaxScaleRef is automatically set. + /// MaxScale is the MaxScale specification that defines the MaxScale resource to be used with the current MariaDB. When enabling this field, MaxScaleRef is automatically set. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxScale")] pub max_scale: Option, - /// MaxScaleRef is a reference to a MaxScale instance that is forwarding the traffic to the current MariaDB instance. Providing this field implies delegating high availability tasks such as primary failover to MaxScale. + /// MaxScaleRef is a reference to a MaxScale resource to be used with the current MariaDB. Providing this field implies delegating high availability tasks such as primary failover to MaxScale. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxScaleRef")] pub max_scale_ref: Option, /// Metrics configures metrics and how to scrape them. @@ -1841,7 +1841,7 @@ pub struct MariaDBEnvFromSecretRef { /// Replication configures high availability via Galera. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct MariaDBGalera { - /// GaleraAgent is a sidecar agent that co-operates with mariadb-operator. More info: https://github.com/mariadb-operator/agent. + /// GaleraAgent is a sidecar agent that co-operates with mariadb-operator. #[serde(default, skip_serializing_if = "Option::is_none")] pub agent: Option, /// Enabled is a flag to enable Galera. @@ -1867,7 +1867,7 @@ pub struct MariaDBGalera { pub volume_claim_template: Option, } -/// GaleraAgent is a sidecar agent that co-operates with mariadb-operator. More info: https://github.com/mariadb-operator/agent. +/// GaleraAgent is a sidecar agent that co-operates with mariadb-operator. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct MariaDBGaleraAgent { /// Args to be used in the Container. @@ -2030,7 +2030,7 @@ pub struct MariaDBGaleraAgentEnvFromSecretRef { pub optional: Option, } -/// GaleraAgent is a sidecar agent that co-operates with mariadb-operator. More info: https://github.com/mariadb-operator/agent. +/// GaleraAgent is a sidecar agent that co-operates with mariadb-operator. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum MariaDBGaleraAgentImagePullPolicy { Always, @@ -3612,7 +3612,7 @@ pub struct MariaDBLivenessProbeTcpSocket { pub port: IntOrString, } -/// MaxScale is the MaxScale specification that defines the MaxScale instance that will be used with MariaDB. When enabling this field, MaxScaleRef is automatically set. +/// MaxScale is the MaxScale specification that defines the MaxScale resource to be used with the current MariaDB. When enabling this field, MaxScaleRef is automatically set. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct MariaDBMaxScale { /// Admin configures the admin REST API and GUI. @@ -3636,7 +3636,7 @@ pub struct MariaDBMaxScale { /// Connection provides a template to define the Connection for MaxScale. #[serde(default, skip_serializing_if = "Option::is_none")] pub connection: Option, - /// Enabled is a flag to enable Metrics + /// Enabled is a flag to enable a MaxScale instance to be used with the current MariaDB. #[serde(default, skip_serializing_if = "Option::is_none")] pub enabled: Option, /// Env represents the environment variables to be injected in a container. @@ -3663,7 +3663,7 @@ pub struct MariaDBMaxScale { /// LivenessProbe to be used in the Container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] pub liveness_probe: Option, - /// Monitor monitors MariaDB server instances. It is required if 'spec.mariaDbRef' is not provided. + /// Monitor monitors MariaDB server instances. #[serde(default, skip_serializing_if = "Option::is_none")] pub monitor: Option, /// NodeSelector to be used in the Pod. @@ -3684,7 +3684,7 @@ pub struct MariaDBMaxScale { /// Replicas indicates the number of desired instances. #[serde(default, skip_serializing_if = "Option::is_none")] pub replicas: Option, - /// RequeueInterval is used to perform requeue reconcilizations. If not defined, it defaults to 10s. + /// RequeueInterval is used to perform requeue reconcilizations. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requeueInterval")] pub requeue_interval: Option, /// Resouces describes the compute resource requirements. @@ -3696,7 +3696,7 @@ pub struct MariaDBMaxScale { /// ServiceAccountName is the name of the ServiceAccount to be used by the Pods. #[serde(default, skip_serializing_if = "Option::is_none", rename = "serviceAccountName")] pub service_account_name: Option, - /// Services define how the traffic is forwarded to the MariaDB servers. It is defaulted if not provided. + /// Services define how the traffic is forwarded to the MariaDB servers. #[serde(default, skip_serializing_if = "Option::is_none")] pub services: Option>, /// SidecarContainers to be used in the Pod. @@ -4257,7 +4257,7 @@ pub struct MariaDBMaxScaleAuthSyncPasswordSecretKeyRef { /// Config defines the MaxScale configuration. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct MariaDBMaxScaleConfig { - /// Params is a key value pair of parameters to be used in the MaxScale static configuration file. + /// Params is a key value pair of parameters to be used in the MaxScale static configuration file. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-maxscale-configuration-guide/#global-settings. #[serde(default, skip_serializing_if = "Option::is_none")] pub params: Option>, /// Sync defines how to replicate configuration across MaxScale replicas. It is defaulted when HA is enabled. @@ -4572,7 +4572,7 @@ pub struct MariaDBMaxScaleEnvFromSecretRef { pub optional: Option, } -/// MaxScale is the MaxScale specification that defines the MaxScale instance that will be used with MariaDB. When enabling this field, MaxScaleRef is automatically set. +/// MaxScale is the MaxScale specification that defines the MaxScale resource to be used with the current MariaDB. When enabling this field, MaxScaleRef is automatically set. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum MariaDBMaxScaleImagePullPolicy { Always, @@ -5203,7 +5203,7 @@ pub struct MariaDBMaxScaleLivenessProbeTcpSocket { pub port: IntOrString, } -/// Monitor monitors MariaDB server instances. It is required if 'spec.mariaDbRef' is not provided. +/// Monitor monitors MariaDB server instances. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct MariaDBMaxScaleMonitor { /// CooperativeMonitoring enables coordination between multiple MaxScale instances running monitors. It is defaulted when HA is enabled. @@ -5218,7 +5218,7 @@ pub struct MariaDBMaxScaleMonitor { /// Name is the identifier of the monitor. It is defaulted if not provided. #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Params defines extra parameters to pass to the monitor. + /// Params defines extra parameters to pass to the monitor. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-common-monitor-parameters/. Monitor specific parameter are also suported: https://mariadb.com/kb/en/mariadb-maxscale-2308-galera-monitor/#galera-monitor-optional-parameters. https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-monitor/#configuration. #[serde(default, skip_serializing_if = "Option::is_none")] pub params: Option>, /// Suspend indicates whether the current resource should be suspended or not. Feature flag --feature-maxscale-suspend is required in the controller to enable this. @@ -5226,7 +5226,7 @@ pub struct MariaDBMaxScaleMonitor { pub suspend: Option, } -/// Monitor monitors MariaDB server instances. It is required if 'spec.mariaDbRef' is not provided. +/// Monitor monitors MariaDB server instances. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum MariaDBMaxScaleMonitorCooperativeMonitoring { #[serde(rename = "majority_of_all")] @@ -5555,7 +5555,7 @@ pub struct MariaDBMaxScaleServices { pub listener: MariaDBMaxScaleServicesListener, /// Name is the identifier of the MaxScale service. pub name: String, - /// Params defines extra parameters to pass to the monitor. + /// Params defines extra parameters to pass to the monitor. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-maxscale-configuration-guide/#service_1. Router specific parameter are also suported: https://mariadb.com/kb/en/mariadb-maxscale-2308-readwritesplit/#configuration. https://mariadb.com/kb/en/mariadb-maxscale-2308-readconnroute/#configuration. #[serde(default, skip_serializing_if = "Option::is_none")] pub params: Option>, /// Router is the type of router to use. @@ -5571,7 +5571,7 @@ pub struct MariaDBMaxScaleServicesListener { /// Name is the identifier of the listener. It is defaulted if not provided #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Params defines extra parameters to pass to the listener. + /// Params defines extra parameters to pass to the listener. Any parameter supported by MaxScale may be specified here. See reference: https://mariadb.com/kb/en/mariadb-maxscale-2308-mariadb-maxscale-configuration-guide/#listener_1. #[serde(default, skip_serializing_if = "Option::is_none")] pub params: Option>, /// Port is the network port where the MaxScale server will listen. @@ -7174,7 +7174,7 @@ pub struct MariaDBMaxScaleVolumesVsphereVolume { pub volume_path: String, } -/// MaxScaleRef is a reference to a MaxScale instance that is forwarding the traffic to the current MariaDB instance. Providing this field implies delegating high availability tasks such as primary failover to MaxScale. +/// MaxScaleRef is a reference to a MaxScale resource to be used with the current MariaDB. Providing this field implies delegating high availability tasks such as primary failover to MaxScale. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct MariaDBMaxScaleRef { /// API version of the referent. diff --git a/kube-custom-resources-rs/src/monitoring_coreos_com/v1/servicemonitors.rs b/kube-custom-resources-rs/src/monitoring_coreos_com/v1/servicemonitors.rs index fa5223e73..c29b049a2 100644 --- a/kube-custom-resources-rs/src/monitoring_coreos_com/v1/servicemonitors.rs +++ b/kube-custom-resources-rs/src/monitoring_coreos_com/v1/servicemonitors.rs @@ -151,8 +151,7 @@ pub struct ServiceMonitorEndpoints { /// If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. #[serde(default, skip_serializing_if = "Option::is_none", rename = "scrapeTimeout")] pub scrape_timeout: Option, - /// Name or number of the target port of the `Pod` object behind the Service, the port must be specified with container port property. - /// Deprecated: use `port` instead. + /// Name or number of the target port of the `Pod` object behind the Service. The port must be specified with the container's port property. #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetPort")] pub target_port: Option, /// TLS configuration to use when scraping the target. diff --git a/kube-custom-resources-rs/src/mq_services_k8s_aws/v1alpha1/brokers.rs b/kube-custom-resources-rs/src/mq_services_k8s_aws/v1alpha1/brokers.rs index ead5d66be..7b4249048 100644 --- a/kube-custom-resources-rs/src/mq_services_k8s_aws/v1alpha1/brokers.rs +++ b/kube-custom-resources-rs/src/mq_services_k8s_aws/v1alpha1/brokers.rs @@ -17,16 +17,20 @@ pub struct BrokerSpec { pub authentication_strategy: Option, #[serde(rename = "autoMinorVersionUpgrade")] pub auto_minor_version_upgrade: bool, - /// A list of information about the configuration. - /// Does not apply to RabbitMQ brokers. + /// A list of information about the configuration. + /// + /// + /// Does not apply to RabbitMQ brokers. #[serde(default, skip_serializing_if = "Option::is_none")] pub configuration: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "creatorRequestID")] pub creator_request_id: Option, #[serde(rename = "deploymentMode")] pub deployment_mode: String, - /// Does not apply to RabbitMQ brokers. - /// Encryption options for the broker. + /// Does not apply to RabbitMQ brokers. + /// + /// + /// Encryption options for the broker. #[serde(default, skip_serializing_if = "Option::is_none", rename = "encryptionOptions")] pub encryption_options: Option, #[serde(rename = "engineType")] @@ -35,14 +39,18 @@ pub struct BrokerSpec { pub engine_version: String, #[serde(rename = "hostInstanceType")] pub host_instance_type: String, - /// Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. - /// Does not apply to RabbitMQ brokers. + /// Optional. The metadata of the LDAP server used to authenticate and authorize + /// connections to the broker. + /// + /// + /// Does not apply to RabbitMQ brokers. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ldapServerMetadata")] pub ldap_server_metadata: Option, /// The list of information about logs to be enabled for the specified broker. #[serde(default, skip_serializing_if = "Option::is_none")] pub logs: Option, - /// The scheduled time period relative to UTC during which Amazon MQ begins to apply pending updates or patches to the broker. + /// The scheduled time period relative to UTC during which Amazon MQ begins to + /// apply pending updates or patches to the broker. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maintenanceWindowStartTime")] pub maintenance_window_start_time: Option, pub name: String, @@ -63,8 +71,10 @@ pub struct BrokerSpec { pub users: Vec, } -/// A list of information about the configuration. -/// Does not apply to RabbitMQ brokers. +/// A list of information about the configuration. +/// +/// +/// Does not apply to RabbitMQ brokers. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerConfiguration { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -73,8 +83,10 @@ pub struct BrokerConfiguration { pub revision: Option, } -/// Does not apply to RabbitMQ brokers. -/// Encryption options for the broker. +/// Does not apply to RabbitMQ brokers. +/// +/// +/// Encryption options for the broker. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerEncryptionOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] @@ -83,8 +95,11 @@ pub struct BrokerEncryptionOptions { pub use_aws_owned_key: Option, } -/// Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. -/// Does not apply to RabbitMQ brokers. +/// Optional. The metadata of the LDAP server used to authenticate and authorize +/// connections to the broker. +/// +/// +/// Does not apply to RabbitMQ brokers. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerLdapServerMetadata { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -120,7 +135,8 @@ pub struct BrokerLogs { pub general: Option, } -/// The scheduled time period relative to UTC during which Amazon MQ begins to apply pending updates or patches to the broker. +/// The scheduled time period relative to UTC during which Amazon MQ begins to +/// apply pending updates or patches to the broker. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerMaintenanceWindowStartTime { #[serde(default, skip_serializing_if = "Option::is_none", rename = "dayOfWeek")] @@ -131,53 +147,74 @@ pub struct BrokerMaintenanceWindowStartTime { pub time_zone: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerSecurityGroupRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerSecurityGroupRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerSubnetRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerSubnetRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// A user associated with the broker. For RabbitMQ brokers, one and only one administrative user is accepted and created when a broker is first provisioned. All subsequent broker users are created by making RabbitMQ API calls directly to brokers or via the RabbitMQ web console. +/// A user associated with the broker. For RabbitMQ brokers, one and only one +/// administrative user is accepted and created when a broker is first provisioned. +/// All subsequent broker users are created by making RabbitMQ API calls directly +/// to brokers or via the RabbitMQ web console. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerUsers { #[serde(default, skip_serializing_if = "Option::is_none", rename = "consoleAccess")] pub console_access: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub groups: Option>, - /// SecretKeyReference combines a k8s corev1.SecretReference with a specific key within the referred-to Secret + /// SecretKeyReference combines a k8s corev1.SecretReference with a + /// specific key within the referred-to Secret #[serde(default, skip_serializing_if = "Option::is_none")] pub password: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub username: Option, } -/// SecretKeyReference combines a k8s corev1.SecretReference with a specific key within the referred-to Secret +/// SecretKeyReference combines a k8s corev1.SecretReference with a +/// specific key within the referred-to Secret #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerUsersPassword { /// Key is the key within the secret @@ -193,7 +230,9 @@ pub struct BrokerUsersPassword { /// BrokerStatus defines the observed state of Broker #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "brokerID")] @@ -202,18 +241,31 @@ pub struct BrokerStatus { pub broker_instances: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "brokerState")] pub broker_state: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. @@ -231,7 +283,9 @@ pub struct BrokerStatusBrokerInstances { pub ip_address: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BrokerStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/opensearchservice_services_k8s_aws/v1alpha1/domains.rs b/kube-custom-resources-rs/src/opensearchservice_services_k8s_aws/v1alpha1/domains.rs index e471b94a1..25b0cf63a 100644 --- a/kube-custom-resources-rs/src/opensearchservice_services_k8s_aws/v1alpha1/domains.rs +++ b/kube-custom-resources-rs/src/opensearchservice_services_k8s_aws/v1alpha1/domains.rs @@ -16,7 +16,10 @@ pub struct DomainSpec { /// IAM access policy as a JSON-formatted string. #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessPolicies")] pub access_policies: Option, - /// Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true. See Advanced cluster parameters (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomain-configure-advanced-options) for more information. + /// Option to allow references to indices in an HTTP request body. Must be false + /// when configuring access to individual sub-resources. By default, the value + /// is true. See Advanced cluster parameters (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomain-configure-advanced-options) + /// for more information. #[serde(default, skip_serializing_if = "Option::is_none", rename = "advancedOptions")] pub advanced_options: Option>, /// Specifies advanced security options. @@ -25,28 +28,39 @@ pub struct DomainSpec { /// Specifies Auto-Tune options. #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoTuneOptions")] pub auto_tune_options: Option, - /// Configuration options for a domain. Specifies the instance type and number of instances in the domain. + /// Configuration options for a domain. Specifies the instance type and number + /// of instances in the domain. #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterConfig")] pub cluster_config: Option, - /// Options to specify the Cognito user and identity pools for OpenSearch Dashboards authentication. For more information, see Configuring Amazon Cognito authentication for OpenSearch Dashboards (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/cognito-auth.html). + /// Options to specify the Cognito user and identity pools for OpenSearch Dashboards + /// authentication. For more information, see Configuring Amazon Cognito authentication + /// for OpenSearch Dashboards (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/cognito-auth.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "cognitoOptions")] pub cognito_options: Option, /// Options to specify configurations that will be applied to the domain endpoint. #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainEndpointOptions")] pub domain_endpoint_options: Option, - /// Options to enable, disable, and specify the type and size of EBS storage volumes. + /// Options to enable, disable, and specify the type and size of EBS storage + /// volumes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ebsOptions")] pub ebs_options: Option, /// Options for encryption of data at rest. #[serde(default, skip_serializing_if = "Option::is_none", rename = "encryptionAtRestOptions")] pub encryption_at_rest_options: Option, - /// String of format Elasticsearch_X.Y or OpenSearch_X.Y to specify the engine version for the Amazon OpenSearch Service domain. For example, "OpenSearch_1.0" or "Elasticsearch_7.9". For more information, see Creating and managing Amazon OpenSearch Service domains (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomains). + /// String of format Elasticsearch_X.Y or OpenSearch_X.Y to specify the engine + /// version for the Amazon OpenSearch Service domain. For example, "OpenSearch_1.0" + /// or "Elasticsearch_7.9". For more information, see Creating and managing Amazon + /// OpenSearch Service domains (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomains). #[serde(default, skip_serializing_if = "Option::is_none", rename = "engineVersion")] pub engine_version: Option, - /// Map of LogType and LogPublishingOption, each containing options to publish a given type of OpenSearch log. + /// Map of LogType and LogPublishingOption, each containing options to publish + /// a given type of OpenSearch log. #[serde(default, skip_serializing_if = "Option::is_none", rename = "logPublishingOptions")] pub log_publishing_options: Option>, - /// The name of the Amazon OpenSearch Service domain you're creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a lowercase letter and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). + /// The name of the Amazon OpenSearch Service domain you're creating. Domain + /// names are unique across the domains owned by an account within an AWS region. + /// Domain names must start with a lowercase letter and can contain the following + /// characters: a-z (lowercase), 0-9, and - (hyphen). pub name: String, /// Node-to-node encryption options. #[serde(default, skip_serializing_if = "Option::is_none", rename = "nodeToNodeEncryptionOptions")] @@ -54,7 +68,9 @@ pub struct DomainSpec { /// A list of Tag added during domain creation. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// Options to specify the subnets and security groups for a VPC endpoint. For more information, see Launching your Amazon OpenSearch Service domains using a VPC (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html). + /// Options to specify the subnets and security groups for a VPC endpoint. For + /// more information, see Launching your Amazon OpenSearch Service domains using + /// a VPC (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcOptions")] pub vpc_options: Option, } @@ -79,17 +95,21 @@ pub struct DomainAdvancedSecurityOptions { /// Credentials for the master user: username and password, ARN, or both. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainAdvancedSecurityOptionsMasterUserOptions { - /// The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS Identity and Access Management for more information. + /// The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities + /// (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS + /// Identity and Access Management for more information. #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserARN")] pub master_user_arn: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserName")] pub master_user_name: Option, - /// SecretKeyReference combines a k8s corev1.SecretReference with a specific key within the referred-to Secret + /// SecretKeyReference combines a k8s corev1.SecretReference with a + /// specific key within the referred-to Secret #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserPassword")] pub master_user_password: Option, } -/// SecretKeyReference combines a k8s corev1.SecretReference with a specific key within the referred-to Secret +/// SecretKeyReference combines a k8s corev1.SecretReference with a +/// specific key within the referred-to Secret #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainAdvancedSecurityOptionsMasterUserOptionsMasterUserPassword { /// Key is the key within the secret @@ -141,30 +161,41 @@ pub struct DomainAutoTuneOptions { pub maintenance_schedules: Option>, } -/// Specifies the Auto-Tune maintenance schedule. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information. +/// Specifies the Auto-Tune maintenance schedule. See Auto-Tune for Amazon OpenSearch +/// Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) +/// for more information. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainAutoTuneOptionsMaintenanceSchedules { #[serde(default, skip_serializing_if = "Option::is_none", rename = "cronExpressionForRecurrence")] pub cron_expression_for_recurrence: Option, - /// The maintenance schedule duration: duration value and duration unit. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information. + /// The maintenance schedule duration: duration value and duration unit. See + /// Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) + /// for more information. #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "startAt")] pub start_at: Option, } -/// The maintenance schedule duration: duration value and duration unit. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information. +/// The maintenance schedule duration: duration value and duration unit. See +/// Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) +/// for more information. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainAutoTuneOptionsMaintenanceSchedulesDuration { - /// The unit of a maintenance schedule duration. Valid value is HOUR. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information. + /// The unit of a maintenance schedule duration. Valid value is HOUR. See Auto-Tune + /// for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) + /// for more information. #[serde(default, skip_serializing_if = "Option::is_none")] pub unit: Option, - /// Integer to specify the value of a maintenance schedule duration. See Auto-Tune for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) for more information. + /// Integer to specify the value of a maintenance schedule duration. See Auto-Tune + /// for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/auto-tune.html) + /// for more information. #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, } -/// Configuration options for a domain. Specifies the instance type and number of instances in the domain. +/// Configuration options for a domain. Specifies the instance type and number +/// of instances in the domain. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainClusterConfig { /// Specifies the configuration for cold storage options such as enabled @@ -186,7 +217,8 @@ pub struct DomainClusterConfig { pub warm_enabled: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "warmType")] pub warm_type: Option, - /// The zone awareness configuration for the domain cluster, such as the number of availability zones. + /// The zone awareness configuration for the domain cluster, such as the number + /// of availability zones. #[serde(default, skip_serializing_if = "Option::is_none", rename = "zoneAwarenessConfig")] pub zone_awareness_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "zoneAwarenessEnabled")] @@ -200,14 +232,17 @@ pub struct DomainClusterConfigColdStorageOptions { pub enabled: Option, } -/// The zone awareness configuration for the domain cluster, such as the number of availability zones. +/// The zone awareness configuration for the domain cluster, such as the number +/// of availability zones. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainClusterConfigZoneAwarenessConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "availabilityZoneCount")] pub availability_zone_count: Option, } -/// Options to specify the Cognito user and identity pools for OpenSearch Dashboards authentication. For more information, see Configuring Amazon Cognito authentication for OpenSearch Dashboards (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/cognito-auth.html). +/// Options to specify the Cognito user and identity pools for OpenSearch Dashboards +/// authentication. For more information, see Configuring Amazon Cognito authentication +/// for OpenSearch Dashboards (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/cognito-auth.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainCognitoOptions { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -225,7 +260,9 @@ pub struct DomainCognitoOptions { pub struct DomainDomainEndpointOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "customEndpoint")] pub custom_endpoint: Option, - /// The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS Identity and Access Management for more information. + /// The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities + /// (http://docs.aws.amazon.com/IAM/latest/UserGuide/index.html) in Using AWS + /// Identity and Access Management for more information. #[serde(default, skip_serializing_if = "Option::is_none", rename = "customEndpointCertificateARN")] pub custom_endpoint_certificate_arn: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "customEndpointEnabled")] @@ -236,7 +273,8 @@ pub struct DomainDomainEndpointOptions { pub tls_security_policy: Option, } -/// Options to enable, disable, and specify the type and size of EBS storage volumes. +/// Options to enable, disable, and specify the type and size of EBS storage +/// volumes. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainEbsOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "ebsEnabled")] @@ -247,7 +285,9 @@ pub struct DomainEbsOptions { pub throughput: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeSize")] pub volume_size: Option, - /// The type of EBS volume, standard, gp2, gp3 or io1. See Configuring EBS-based Storage (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/opensearch-createupdatedomains.html#opensearch-createdomain-configure-ebs) for more information. + /// The type of EBS volume, standard, gp2, gp3 or io1. See Configuring EBS-based + /// Storage (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/opensearch-createupdatedomains.html#opensearch-createdomain-configure-ebs) + /// for more information. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeType")] pub volume_type: Option, } @@ -261,7 +301,8 @@ pub struct DomainEncryptionAtRestOptions { pub kms_key_id: Option, } -/// Map of LogType and LogPublishingOption, each containing options to publish a given type of OpenSearch log. +/// Map of LogType and LogPublishingOption, each containing options to publish +/// a given type of OpenSearch log. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainLogPublishingOptions { /// ARN of the Cloudwatch log group to publish logs to. @@ -281,15 +322,19 @@ pub struct DomainNodeToNodeEncryptionOptions { /// A key value pair for a resource tag. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainTags { - /// A string of length from 1 to 128 characters that specifies the key for a tag. Tag keys must be unique for the domain to which they're attached. + /// A string of length from 1 to 128 characters that specifies the key for a + /// tag. Tag keys must be unique for the domain to which they're attached. #[serde(default, skip_serializing_if = "Option::is_none")] pub key: Option, - /// A string of length from 0 to 256 characters that specifies the value for a tag. Tag values can be null and don't have to be unique in a tag set. + /// A string of length from 0 to 256 characters that specifies the value for + /// a tag. Tag values can be null and don't have to be unique in a tag set. #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, } -/// Options to specify the subnets and security groups for a VPC endpoint. For more information, see Launching your Amazon OpenSearch Service domains using a VPC (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html). +/// Options to specify the subnets and security groups for a VPC endpoint. For +/// more information, see Launching your Amazon OpenSearch Service domains using +/// a VPC (http://docs.aws.amazon.com/opensearch-service/latest/developerguide/vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainVpcOptions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -301,19 +346,28 @@ pub struct DomainVpcOptions { /// DomainStatus defines the observed state of Domain #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, /// Specifies change details of the domain configuration change. #[serde(default, skip_serializing_if = "Option::is_none", rename = "changeProgressDetails")] pub change_progress_details: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The domain creation status. True if the creation of a domain is complete. False if domain creation is still in progress. + /// The domain creation status. True if the creation of a domain is complete. + /// False if domain creation is still in progress. #[serde(default, skip_serializing_if = "Option::is_none")] pub created: Option, - /// The domain deletion status. True if a delete request has been received for the domain but resource cleanup is still in progress. False if the domain has not been deleted. Once domain deletion is complete, the status of the domain is no longer returned. + /// The domain deletion status. True if a delete request has been received for + /// the domain but resource cleanup is still in progress. False if the domain + /// has not been deleted. Once domain deletion is complete, the status of the + /// domain is no longer returned. #[serde(default, skip_serializing_if = "Option::is_none")] pub deleted: Option, /// The unique identifier for the specified domain. @@ -322,10 +376,12 @@ pub struct DomainStatus { /// The domain endpoint that you use to submit index and search requests. #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoint: Option, - /// Map containing the domain endpoints used to submit index and search requests. Example key, value: 'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'. + /// Map containing the domain endpoints used to submit index and search requests. + /// Example key, value: 'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'. #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoints: Option>, - /// The status of the domain configuration. True if Amazon OpenSearch Service is processing configuration changes. False if the configuration is active. + /// The status of the domain configuration. True if Amazon OpenSearch Service + /// is processing configuration changes. False if the configuration is active. #[serde(default, skip_serializing_if = "Option::is_none")] pub processing: Option, /// The current status of the domain's service software. @@ -334,18 +390,29 @@ pub struct DomainStatus { /// The status of the SnapshotOptions. #[serde(default, skip_serializing_if = "Option::is_none", rename = "snapshotOptions")] pub snapshot_options: Option, - /// The status of a domain version upgrade. True if Amazon OpenSearch Service is undergoing a version upgrade. False if the configuration is active. + /// The status of a domain version upgrade. True if Amazon OpenSearch Service + /// is undergoing a version upgrade. False if the configuration is active. #[serde(default, skip_serializing_if = "Option::is_none", rename = "upgradeProcessing")] pub upgrade_processing: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. @@ -361,7 +428,9 @@ pub struct DomainStatusChangeProgressDetails { pub message: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/opentelemetry_io/v1alpha1/opentelemetrycollectors.rs b/kube-custom-resources-rs/src/opentelemetry_io/v1alpha1/opentelemetrycollectors.rs index 1968bd221..d2154a7ce 100644 --- a/kube-custom-resources-rs/src/opentelemetry_io/v1alpha1/opentelemetrycollectors.rs +++ b/kube-custom-resources-rs/src/opentelemetry_io/v1alpha1/opentelemetrycollectors.rs @@ -3002,7 +3002,7 @@ pub struct OpenTelemetryCollectorTargetAllocator { /// If specified, indicates the pod's scheduling constraints #[serde(default, skip_serializing_if = "Option::is_none")] pub affinity: Option, - /// AllocationStrategy determines which strategy the target allocator should use for allocation. The current options are least-weighted and consistent-hashing. The default option is consistent-hashing + /// AllocationStrategy determines which strategy the target allocator should use for allocation. The current options are least-weighted, consistent-hashing and per-node. The default is consistent-hashing. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocationStrategy")] pub allocation_strategy: Option, /// Enabled indicates whether to use a target allocation mechanism for Prometheus targets or not. @@ -3489,6 +3489,8 @@ pub enum OpenTelemetryCollectorTargetAllocatorAllocationStrategy { LeastWeighted, #[serde(rename = "consistent-hashing")] ConsistentHashing, + #[serde(rename = "per-node")] + PerNode, } /// EnvVar represents an environment variable present in a Container. diff --git a/kube-custom-resources-rs/src/operator_cryostat_io/v1beta1/cryostats.rs b/kube-custom-resources-rs/src/operator_cryostat_io/v1beta1/cryostats.rs index c249e6f44..912e2316c 100644 --- a/kube-custom-resources-rs/src/operator_cryostat_io/v1beta1/cryostats.rs +++ b/kube-custom-resources-rs/src/operator_cryostat_io/v1beta1/cryostats.rs @@ -2453,9 +2453,21 @@ pub struct CryostatStorageOptionsPvcSpecSelectorMatchExpressions { /// Options to configure the Cryostat application's target discovery mechanisms. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct CryostatTargetDiscoveryOptions { - /// When true, the Cryostat application will disable the built-in discovery mechanisms. Defaults to false + /// When true, the Cryostat application will disable the built-in discovery mechanisms. Defaults to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "builtInDiscoveryDisabled")] pub built_in_discovery_disabled: Option, + /// When true, the Cryostat application will use the default port name jfr-jmx to look for JMX connectable targets. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableBuiltInPortNames")] + pub disable_built_in_port_names: Option, + /// When true, the Cryostat application will use the default port number 9091 to look for JMX connectable targets. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableBuiltInPortNumbers")] + pub disable_built_in_port_numbers: Option, + /// List of port names that the Cryostat application should look for in order to consider a target as JMX connectable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "discoveryPortNames")] + pub discovery_port_names: Option>, + /// List of port numbers that the Cryostat application should look for in order to consider a target as JMX connectable. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "discoveryPortNumbers")] + pub discovery_port_numbers: Option>, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] diff --git a/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourconfigurations.rs b/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourconfigurations.rs index 3ae48248a..31876f079 100644 --- a/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourconfigurations.rs +++ b/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourconfigurations.rs @@ -586,28 +586,14 @@ pub struct ContourConfigurationEnvoyTimeouts { /// is configured to serve traffic. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ContourConfigurationGateway { - /// ControllerName is used to determine whether Contour should reconcile a - /// GatewayClass. The string takes the form of "projectcontour.io//contour". - /// If unset, the gatewayclass controller will not be started. - /// Exactly one of ControllerName or GatewayRef must be set. - /// Deprecated: users should use GatewayRef, or the Gateway provisioner, - /// in place of this field. This field will be removed in a future release. - #[serde(default, skip_serializing_if = "Option::is_none", rename = "controllerName")] - pub controller_name: Option, - /// GatewayRef defines a specific Gateway that this Contour - /// instance corresponds to. If set, Contour will reconcile - /// only this gateway, and will not reconcile any gateway - /// classes. - /// Exactly one of ControllerName or GatewayRef must be set. - #[serde(default, skip_serializing_if = "Option::is_none", rename = "gatewayRef")] - pub gateway_ref: Option, + /// GatewayRef defines the specific Gateway that this Contour + /// instance corresponds to. + #[serde(rename = "gatewayRef")] + pub gateway_ref: ContourConfigurationGatewayGatewayRef, } -/// GatewayRef defines a specific Gateway that this Contour -/// instance corresponds to. If set, Contour will reconcile -/// only this gateway, and will not reconcile any gateway -/// classes. -/// Exactly one of ControllerName or GatewayRef must be set. +/// GatewayRef defines the specific Gateway that this Contour +/// instance corresponds to. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ContourConfigurationGatewayGatewayRef { pub name: String, diff --git a/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourdeployments.rs b/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourdeployments.rs index 150defb36..21d69e453 100644 --- a/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourdeployments.rs +++ b/kube-custom-resources-rs/src/projectcontour_io/v1alpha1/contourdeployments.rs @@ -2710,28 +2710,14 @@ pub struct ContourDeploymentRuntimeSettingsEnvoyTimeouts { /// is configured to serve traffic. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ContourDeploymentRuntimeSettingsGateway { - /// ControllerName is used to determine whether Contour should reconcile a - /// GatewayClass. The string takes the form of "projectcontour.io//contour". - /// If unset, the gatewayclass controller will not be started. - /// Exactly one of ControllerName or GatewayRef must be set. - /// Deprecated: users should use GatewayRef, or the Gateway provisioner, - /// in place of this field. This field will be removed in a future release. - #[serde(default, skip_serializing_if = "Option::is_none", rename = "controllerName")] - pub controller_name: Option, - /// GatewayRef defines a specific Gateway that this Contour - /// instance corresponds to. If set, Contour will reconcile - /// only this gateway, and will not reconcile any gateway - /// classes. - /// Exactly one of ControllerName or GatewayRef must be set. - #[serde(default, skip_serializing_if = "Option::is_none", rename = "gatewayRef")] - pub gateway_ref: Option, -} - -/// GatewayRef defines a specific Gateway that this Contour -/// instance corresponds to. If set, Contour will reconcile -/// only this gateway, and will not reconcile any gateway -/// classes. -/// Exactly one of ControllerName or GatewayRef must be set. + /// GatewayRef defines the specific Gateway that this Contour + /// instance corresponds to. + #[serde(rename = "gatewayRef")] + pub gateway_ref: ContourDeploymentRuntimeSettingsGatewayGatewayRef, +} + +/// GatewayRef defines the specific Gateway that this Contour +/// instance corresponds to. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ContourDeploymentRuntimeSettingsGatewayGatewayRef { pub name: String, diff --git a/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/alertmanagerdefinitions.rs b/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/alertmanagerdefinitions.rs index fe0db3cf8..2d6b23e83 100644 --- a/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/alertmanagerdefinitions.rs +++ b/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/alertmanagerdefinitions.rs @@ -16,22 +16,36 @@ pub struct AlertManagerDefinitionSpec { /// The ID of the workspace in which to create the alert manager definition. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workspaceID")] pub workspace_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "workspaceRef")] pub workspace_ref: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AlertManagerDefinitionWorkspaceRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AlertManagerDefinitionWorkspaceRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -41,10 +55,15 @@ pub struct AlertManagerDefinitionWorkspaceRefFrom { /// AlertManagerDefinitionStatus defines the observed state of AlertManagerDefinition #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AlertManagerDefinitionStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// Status code of this definition. @@ -55,20 +74,32 @@ pub struct AlertManagerDefinitionStatus { pub status_reason: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AlertManagerDefinitionStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AlertManagerDefinitionStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/rulegroupsnamespaces.rs b/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/rulegroupsnamespaces.rs index 27182fa15..02e179f9c 100644 --- a/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/rulegroupsnamespaces.rs +++ b/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/rulegroupsnamespaces.rs @@ -22,22 +22,36 @@ pub struct RuleGroupsNamespaceSpec { /// The ID of the workspace in which to create the rule group namespace. #[serde(default, skip_serializing_if = "Option::is_none", rename = "workspaceID")] pub workspace_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "workspaceRef")] pub workspace_ref: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RuleGroupsNamespaceWorkspaceRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RuleGroupsNamespaceWorkspaceRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -47,10 +61,15 @@ pub struct RuleGroupsNamespaceWorkspaceRefFrom { /// RuleGroupsNamespaceStatus defines the observed state of RuleGroupsNamespace #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RuleGroupsNamespaceStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The status of rule groups namespace. @@ -58,20 +77,32 @@ pub struct RuleGroupsNamespaceStatus { pub status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RuleGroupsNamespaceStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct RuleGroupsNamespaceStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/workspaces.rs b/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/workspaces.rs index 962d7b24b..17f365c0a 100644 --- a/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/workspaces.rs +++ b/kube-custom-resources-rs/src/prometheusservice_services_k8s_aws/v1alpha1/workspaces.rs @@ -13,7 +13,8 @@ use std::collections::BTreeMap; #[kube(status = "WorkspaceStatus")] #[kube(schema = "disabled")] pub struct WorkspaceSpec { - /// An optional user-assigned alias for this workspace. This alias is for user reference and does not need to be unique. + /// An optional user-assigned alias for this workspace. This alias is for user + /// reference and does not need to be unique. #[serde(default, skip_serializing_if = "Option::is_none")] pub alias: Option, /// Optional, user-provided tags for this workspace. @@ -24,10 +25,15 @@ pub struct WorkspaceSpec { /// WorkspaceStatus defines the observed state of Workspace #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct WorkspaceStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The status of the workspace that was just created (usually CREATING). @@ -38,20 +44,32 @@ pub struct WorkspaceStatus { pub workspace_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct WorkspaceStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct WorkspaceStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/quay_redhat_com/v1/quayregistries.rs b/kube-custom-resources-rs/src/quay_redhat_com/v1/quayregistries.rs index 1d67c4827..48f3d1be8 100644 --- a/kube-custom-resources-rs/src/quay_redhat_com/v1/quayregistries.rs +++ b/kube-custom-resources-rs/src/quay_redhat_com/v1/quayregistries.rs @@ -48,6 +48,9 @@ pub struct QuayRegistryComponentsOverrides { pub labels: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub replicas: Option, + /// Resources describes the resource limits and requests for a component. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub resources: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeSize")] pub volume_size: Option, } @@ -538,6 +541,17 @@ pub struct QuayRegistryComponentsOverridesEnvValueFromSecretKeyRef { pub optional: Option, } +/// Resources describes the resource limits and requests for a component. +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] +pub struct QuayRegistryComponentsOverridesResources { + /// ResourceList is a set of (resource name, quantity) pairs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub limits: Option>, + /// ResourceList is a set of (resource name, quantity) pairs. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub requests: Option>, +} + /// QuayRegistryStatus defines the observed state of QuayRegistry. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct QuayRegistryStatus { diff --git a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusterparametergroups.rs b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusterparametergroups.rs index 7555c6162..2967678c1 100644 --- a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusterparametergroups.rs +++ b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusterparametergroups.rs @@ -6,9 +6,14 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// DBClusterParameterGroupSpec defines the desired state of DBClusterParameterGroup. -/// Contains the details of an Amazon RDS DB cluster parameter group. -/// This data type is used as a response element in the DescribeDBClusterParameterGroups action. +/// DBClusterParameterGroupSpec defines the desired state of DBClusterParameterGroup. +/// +/// +/// Contains the details of an Amazon RDS DB cluster parameter group. +/// +/// +/// This data type is used as a response element in the DescribeDBClusterParameterGroups +/// action. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "rds.services.k8s.aws", version = "v1alpha1", kind = "DBClusterParameterGroup", plural = "dbclusterparametergroups")] #[kube(namespaced)] @@ -17,38 +22,100 @@ use std::collections::BTreeMap; pub struct DBClusterParameterGroupSpec { /// The description for the DB cluster parameter group. pub description: String, - /// The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family. - /// Aurora MySQL - /// Example: aurora5.6, aurora-mysql5.7, aurora-mysql8.0 - /// Aurora PostgreSQL - /// Example: aurora-postgresql9.6 - /// RDS for MySQL - /// Example: mysql8.0 - /// RDS for PostgreSQL - /// Example: postgres12 - /// To list all of the available parameter group families for a DB engine, use the following command: - /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" --engine - /// For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command: - /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" --engine aurora-postgresql - /// The output contains duplicates. - /// The following are the valid DB engine values: - /// * aurora (for MySQL 5.6-compatible Aurora) - /// * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) - /// * aurora-postgresql - /// * mysql - /// * postgres + /// The DB cluster parameter group family name. A DB cluster parameter group + /// can be associated with one and only one DB cluster parameter group family, + /// and can be applied only to a DB cluster running a database engine and engine + /// version compatible with that DB cluster parameter group family. + /// + /// + /// Aurora MySQL + /// + /// + /// Example: aurora5.6, aurora-mysql5.7, aurora-mysql8.0 + /// + /// + /// Aurora PostgreSQL + /// + /// + /// Example: aurora-postgresql9.6 + /// + /// + /// RDS for MySQL + /// + /// + /// Example: mysql8.0 + /// + /// + /// RDS for PostgreSQL + /// + /// + /// Example: postgres12 + /// + /// + /// To list all of the available parameter group families for a DB engine, use + /// the following command: + /// + /// + /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" + /// --engine + /// + /// + /// For example, to list all of the available parameter group families for the + /// Aurora PostgreSQL DB engine, use the following command: + /// + /// + /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" + /// --engine aurora-postgresql + /// + /// + /// The output contains duplicates. + /// + /// + /// The following are the valid DB engine values: + /// + /// + /// * aurora (for MySQL 5.6-compatible Aurora) + /// + /// + /// * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) + /// + /// + /// * aurora-postgresql + /// + /// + /// * mysql + /// + /// + /// * postgres pub family: String, - /// The name of the DB cluster parameter group. - /// Constraints: - /// * Must not match the name of an existing DB cluster parameter group. - /// This value is stored as a lowercase string. + /// The name of the DB cluster parameter group. + /// + /// + /// Constraints: + /// + /// + /// * Must not match the name of an existing DB cluster parameter group. + /// + /// + /// This value is stored as a lowercase string. pub name: String, #[serde(default, skip_serializing_if = "Option::is_none", rename = "parameterOverrides")] pub parameter_overrides: Option>, - /// A list of parameters in the DB cluster parameter group to modify. - /// Valid Values (for the application method): immediate | pending-reboot - /// You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters. - /// When the application method is immediate, changes to dynamic parameters are applied immediately to the DB clusters associated with the parameter group. When the application method is pending-reboot, changes to dynamic and static parameters are applied after a reboot without failover to the DB clusters associated with the parameter group. + /// A list of parameters in the DB cluster parameter group to modify. + /// + /// + /// Valid Values (for the application method): immediate | pending-reboot + /// + /// + /// You can use the immediate value with dynamic parameters only. You can use + /// the pending-reboot value for both dynamic and static parameters. + /// + /// + /// When the application method is immediate, changes to dynamic parameters are + /// applied immediately to the DB clusters associated with the parameter group. + /// When the application method is pending-reboot, changes to dynamic and static + /// parameters are applied after a reboot without failover to the DB clusters + /// associated with the parameter group. #[serde(default, skip_serializing_if = "Option::is_none")] pub parameters: Option>, /// Tags to assign to the DB cluster parameter group. @@ -56,8 +123,12 @@ pub struct DBClusterParameterGroupSpec { pub tags: Option>, } -/// This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions. -/// This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions. +/// This data type is used as a request parameter in the ModifyDBParameterGroup +/// and ResetDBParameterGroup actions. +/// +/// +/// This data type is used as a response element in the DescribeEngineDefaultParameters +/// and DescribeDBParameters actions. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterParameterGroupParameters { #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedValues")] @@ -84,8 +155,11 @@ pub struct DBClusterParameterGroupParameters { pub supported_engine_modes: Option>, } -/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. -/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide. +/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +/// +/// +/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterParameterGroupTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -97,10 +171,15 @@ pub struct DBClusterParameterGroupTags { /// DBClusterParameterGroupStatus defines the observed state of DBClusterParameterGroup #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterParameterGroupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// Provides a list of parameters for the DB cluster parameter group. @@ -108,20 +187,32 @@ pub struct DBClusterParameterGroupStatus { pub parameter_override_statuses: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterParameterGroupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterParameterGroupStatusConditions { /// Last time the condition transitioned from one status to another. @@ -140,8 +231,12 @@ pub struct DBClusterParameterGroupStatusConditions { pub r#type: String, } -/// This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions. -/// This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions. +/// This data type is used as a request parameter in the ModifyDBParameterGroup +/// and ResetDBParameterGroup actions. +/// +/// +/// This data type is used as a response element in the DescribeEngineDefaultParameters +/// and DescribeDBParameters actions. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterParameterGroupStatusParameterOverrideStatuses { #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedValues")] diff --git a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusters.rs b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusters.rs index e76dbb204..8c0874d86 100644 --- a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusters.rs +++ b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbclusters.rs @@ -5,441 +5,1067 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// DBClusterSpec defines the desired state of DBCluster. -/// Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster. -/// For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster. -/// For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime. -/// For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) in the Amazon Aurora User Guide. -/// For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the Amazon RDS User Guide. +/// DBClusterSpec defines the desired state of DBCluster. +/// +/// +/// Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster. +/// +/// +/// For an Amazon Aurora DB cluster, this data type is used as a response element +/// in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, +/// ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, +/// RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster. +/// +/// +/// For a Multi-AZ DB cluster, this data type is used as a response element in +/// the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, +/// ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime. +/// +/// +/// For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? +/// (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +/// in the Amazon Aurora User Guide. +/// +/// +/// For more information on Multi-AZ DB clusters, see Multi-AZ deployments with +/// two readable standby DB instances (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) +/// in the Amazon RDS User Guide. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "rds.services.k8s.aws", version = "v1alpha1", kind = "DBCluster", plural = "dbclusters")] #[kube(namespaced)] #[kube(status = "DBClusterStatus")] #[kube(schema = "disabled")] pub struct DBClusterSpec { - /// The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. - /// This setting is required to create a Multi-AZ DB cluster. - /// Valid for: Multi-AZ DB clusters only + /// The amount of storage in gibibytes (GiB) to allocate to each DB instance + /// in the Multi-AZ DB cluster. + /// + /// + /// This setting is required to create a Multi-AZ DB cluster. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocatedStorage")] pub allocated_storage: Option, - /// A value that indicates whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. - /// Valid for: Multi-AZ DB clusters only + /// A value that indicates whether minor engine upgrades are applied automatically + /// to the DB cluster during the maintenance window. By default, minor engine + /// upgrades are applied automatically. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoMinorVersionUpgrade")] pub auto_minor_version_upgrade: Option, - /// A list of Availability Zones (AZs) where DB instances in the DB cluster can be created. - /// For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the Amazon Aurora User Guide. - /// Valid for: Aurora DB clusters only + /// A list of Availability Zones (AZs) where DB instances in the DB cluster can + /// be created. + /// + /// + /// For information on Amazon Web Services Regions and Availability Zones, see + /// Choosing the Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "availabilityZones")] pub availability_zones: Option>, - /// The target backtrack window, in seconds. To disable backtracking, set this value to 0. - /// Default: 0 - /// Constraints: - /// * If specified, this value must be set to a number from 0 to 259,200 (72 hours). - /// Valid for: Aurora MySQL DB clusters only + /// The target backtrack window, in seconds. To disable backtracking, set this + /// value to 0. + /// + /// + /// Default: 0 + /// + /// + /// Constraints: + /// + /// + /// * If specified, this value must be set to a number from 0 to 259,200 (72 + /// hours). + /// + /// + /// Valid for: Aurora MySQL DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "backtrackWindow")] pub backtrack_window: Option, - /// The number of days for which automated backups are retained. - /// Default: 1 - /// Constraints: - /// * Must be a value from 1 to 35 - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The number of days for which automated backups are retained. + /// + /// + /// Default: 1 + /// + /// + /// Constraints: + /// + /// + /// * Must be a value from 1 to 35 + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupRetentionPeriod")] pub backup_retention_period: Option, - /// A value that indicates that the DB cluster should be associated with the specified CharacterSet. - /// Valid for: Aurora DB clusters only + /// A value that indicates that the DB cluster should be associated with the + /// specified CharacterSet. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "characterSetName")] pub character_set_name: Option, - /// A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// A value that indicates whether to copy all tags from the DB cluster to snapshots + /// of the DB cluster. The default is not to copy them. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "copyTagsToSnapshot")] pub copy_tags_to_snapshot: Option, - /// The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The name for your database of up to 64 alphanumeric characters. If you do + /// not provide a name, Amazon RDS doesn't create a database in the DB cluster + /// you are creating. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "databaseName")] pub database_name: Option, - /// The DB cluster identifier. This parameter is stored as a lowercase string. - /// Constraints: - /// * Must contain from 1 to 63 letters, numbers, or hyphens. - /// * First character must be a letter. - /// * Can't end with a hyphen or contain two consecutive hyphens. - /// Example: my-cluster1 - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The DB cluster identifier. This parameter is stored as a lowercase string. + /// + /// + /// Constraints: + /// + /// + /// * Must contain from 1 to 63 letters, numbers, or hyphens. + /// + /// + /// * First character must be a letter. + /// + /// + /// * Can't end with a hyphen or contain two consecutive hyphens. + /// + /// + /// Example: my-cluster1 + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(rename = "dbClusterIdentifier")] pub db_cluster_identifier: String, - /// The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6gd.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. - /// For the full list of DB instance classes and availability for your engine, see DB instance class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide. - /// This setting is required to create a Multi-AZ DB cluster. - /// Valid for: Multi-AZ DB clusters only + /// The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, + /// for example db.m6gd.xlarge. Not all DB instance classes are available in + /// all Amazon Web Services Regions, or for all database engines. + /// + /// + /// For the full list of DB instance classes and availability for your engine, + /// see DB instance class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) + /// in the Amazon RDS User Guide. + /// + /// + /// This setting is required to create a Multi-AZ DB cluster. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterInstanceClass")] pub db_cluster_instance_class: Option, - /// The name of the DB cluster parameter group to associate with this DB cluster. If you do not specify a value, then the default DB cluster parameter group for the specified DB engine and version is used. - /// Constraints: - /// * If supplied, must match the name of an existing DB cluster parameter group. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The name of the DB cluster parameter group to associate with this DB cluster. + /// If you do not specify a value, then the default DB cluster parameter group + /// for the specified DB engine and version is used. + /// + /// + /// Constraints: + /// + /// + /// * If supplied, must match the name of an existing DB cluster parameter + /// group. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterParameterGroupName")] pub db_cluster_parameter_group_name: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterParameterGroupRef")] pub db_cluster_parameter_group_ref: Option, - /// A DB subnet group to associate with this DB cluster. - /// This setting is required to create a Multi-AZ DB cluster. - /// Constraints: Must match the name of an existing DBSubnetGroup. Must not be default. - /// Example: mydbsubnetgroup - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// A DB subnet group to associate with this DB cluster. + /// + /// + /// This setting is required to create a Multi-AZ DB cluster. + /// + /// + /// Constraints: Must match the name of an existing DBSubnetGroup. Must not be + /// default. + /// + /// + /// Example: mydbsubnetgroup + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSubnetGroupName")] pub db_subnet_group_name: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSubnetGroupRef")] pub db_subnet_group_ref: Option, /// Reserved for future use. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSystemID")] pub db_system_id: Option, - /// A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// A value that indicates whether the DB cluster has deletion protection enabled. + /// The database can't be deleted when deletion protection is enabled. By default, + /// deletion protection isn't enabled. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "deletionProtection")] pub deletion_protection: Option, /// DestinationRegion is used for presigning the request to a given region. #[serde(default, skip_serializing_if = "Option::is_none", rename = "destinationRegion")] pub destination_region: Option, - /// The Active Directory directory ID to create the DB cluster in. - /// For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster. - /// For more information, see Kerberos authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) in the Amazon Aurora User Guide. - /// Valid for: Aurora DB clusters only + /// The Active Directory directory ID to create the DB cluster in. + /// + /// + /// For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication + /// to authenticate users that connect to the DB cluster. + /// + /// + /// For more information, see Kerberos authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none")] pub domain: Option, - /// Specify the name of the IAM role to be used when making API calls to the Directory Service. - /// Valid for: Aurora DB clusters only + /// Specify the name of the IAM role to be used when making API calls to the + /// Directory Service. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainIAMRoleName")] pub domain_iam_role_name: Option, - /// The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. - /// RDS for MySQL - /// Possible values are error, general, and slowquery. - /// RDS for PostgreSQL - /// Possible values are postgresql and upgrade. - /// Aurora MySQL - /// Possible values are audit, error, general, and slowquery. - /// Aurora PostgreSQL - /// Possible value is postgresql. - /// For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the Amazon RDS User Guide. - /// For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the Amazon Aurora User Guide. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The list of log types that need to be enabled for exporting to CloudWatch + /// Logs. The values in the list depend on the DB engine being used. + /// + /// + /// RDS for MySQL + /// + /// + /// Possible values are error, general, and slowquery. + /// + /// + /// RDS for PostgreSQL + /// + /// + /// Possible values are postgresql and upgrade. + /// + /// + /// Aurora MySQL + /// + /// + /// Possible values are audit, error, general, and slowquery. + /// + /// + /// Aurora PostgreSQL + /// + /// + /// Possible value is postgresql. + /// + /// + /// For more information about exporting CloudWatch Logs for Amazon RDS, see + /// Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + /// in the Amazon RDS User Guide. + /// + /// + /// For more information about exporting CloudWatch Logs for Amazon Aurora, see + /// Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + /// in the Amazon Aurora User Guide. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableCloudwatchLogsExports")] pub enable_cloudwatch_logs_exports: Option>, - /// A value that indicates whether to enable this DB cluster to forward write operations to the primary cluster of an Aurora global database (GlobalCluster). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database. - /// You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by the FailoverGlobalCluster API operation, but it does nothing until then. - /// Valid for: Aurora DB clusters only + /// A value that indicates whether to enable this DB cluster to forward write + /// operations to the primary cluster of an Aurora global database (GlobalCluster). + /// By default, write operations are not allowed on Aurora DB clusters that are + /// secondary clusters in an Aurora global database. + /// + /// + /// You can set this value only on Aurora DB clusters that are members of an + /// Aurora global database. With this parameter enabled, a secondary cluster + /// can forward writes to the current primary cluster and the resulting changes + /// are replicated back to this cluster. For the primary DB cluster of an Aurora + /// global database, this value is used immediately if the primary is demoted + /// by the FailoverGlobalCluster API operation, but it does nothing until then. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableGlobalWriteForwarding")] pub enable_global_write_forwarding: Option, - /// A value that indicates whether to enable the HTTP endpoint for an Aurora Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. - /// When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor. - /// For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the Amazon Aurora User Guide. - /// Valid for: Aurora DB clusters only + /// A value that indicates whether to enable the HTTP endpoint for an Aurora + /// Serverless v1 DB cluster. By default, the HTTP endpoint is disabled. + /// + /// + /// When enabled, the HTTP endpoint provides a connectionless web service API + /// for running SQL queries on the Aurora Serverless v1 DB cluster. You can also + /// query your database from inside the RDS console with the query editor. + /// + /// + /// For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableHTTPEndpoint")] pub enable_http_endpoint: Option, - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. - /// For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the Amazon Aurora User Guide. - /// Valid for: Aurora DB clusters only + /// A value that indicates whether to enable mapping of Amazon Web Services Identity + /// and Access Management (IAM) accounts to database accounts. By default, mapping + /// isn't enabled. + /// + /// + /// For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableIAMDatabaseAuthentication")] pub enable_iam_database_authentication: Option, - /// A value that indicates whether to turn on Performance Insights for the DB cluster. - /// For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the Amazon RDS User Guide. - /// Valid for: Multi-AZ DB clusters only + /// A value that indicates whether to turn on Performance Insights for the DB + /// cluster. + /// + /// + /// For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) + /// in the Amazon RDS User Guide. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "enablePerformanceInsights")] pub enable_performance_insights: Option, - /// The name of the database engine to be used for this DB cluster. - /// Valid Values: - /// * aurora-mysql - /// * aurora-postgresql - /// * mysql - /// * postgres - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The name of the database engine to be used for this DB cluster. + /// + /// + /// Valid Values: + /// + /// + /// * aurora-mysql + /// + /// + /// * aurora-postgresql + /// + /// + /// * mysql + /// + /// + /// * postgres + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters pub engine: String, - /// The DB engine mode of the DB cluster, either provisioned or serverless. - /// The serverless engine mode only applies for Aurora Serverless v1 DB clusters. - /// Limitations and requirements apply to some DB engine modes. For more information, see the following sections in the Amazon Aurora User Guide: - /// * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) - /// * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) - /// * Limitations of parallel query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) - /// * Limitations of Aurora global databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) - /// Valid for: Aurora DB clusters only + /// The DB engine mode of the DB cluster, either provisioned or serverless. + /// + /// + /// The serverless engine mode only applies for Aurora Serverless v1 DB clusters. + /// + /// + /// Limitations and requirements apply to some DB engine modes. For more information, + /// see the following sections in the Amazon Aurora User Guide: + /// + /// + /// * Limitations of Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) + /// + /// + /// * Requirements for Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) + /// + /// + /// * Limitations of parallel query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) + /// + /// + /// * Limitations of Aurora global databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "engineMode")] pub engine_mode: Option, - /// The version number of the database engine to use. - /// To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (MySQL 8.0-compatible), use the following command: - /// aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" - /// You can supply either 5.7 or 8.0 to use the default engine version for Aurora MySQL version 2 or version 3, respectively. - /// To list all of the available engine versions for Aurora PostgreSQL, use the following command: - /// aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" - /// To list all of the available engine versions for RDS for MySQL, use the following command: - /// aws rds describe-db-engine-versions --engine mysql --query "DBEngineVersions[].EngineVersion" - /// To list all of the available engine versions for RDS for PostgreSQL, use the following command: - /// aws rds describe-db-engine-versions --engine postgres --query "DBEngineVersions[].EngineVersion" - /// Aurora MySQL - /// For information, see Database engine updates for Amazon Aurora MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) in the Amazon Aurora User Guide. - /// Aurora PostgreSQL - /// For information, see Amazon Aurora PostgreSQL releases and engine versions (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) in the Amazon Aurora User Guide. - /// MySQL - /// For information, see Amazon RDS for MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the Amazon RDS User Guide. - /// PostgreSQL - /// For information, see Amazon RDS for PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the Amazon RDS User Guide. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The version number of the database engine to use. + /// + /// + /// To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) + /// and version 3 (MySQL 8.0-compatible), use the following command: + /// + /// + /// aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + /// + /// + /// You can supply either 5.7 or 8.0 to use the default engine version for Aurora + /// MySQL version 2 or version 3, respectively. + /// + /// + /// To list all of the available engine versions for Aurora PostgreSQL, use the + /// following command: + /// + /// + /// aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" + /// + /// + /// To list all of the available engine versions for RDS for MySQL, use the following + /// command: + /// + /// + /// aws rds describe-db-engine-versions --engine mysql --query "DBEngineVersions[].EngineVersion" + /// + /// + /// To list all of the available engine versions for RDS for PostgreSQL, use + /// the following command: + /// + /// + /// aws rds describe-db-engine-versions --engine postgres --query "DBEngineVersions[].EngineVersion" + /// + /// + /// Aurora MySQL + /// + /// + /// For information, see Database engine updates for Amazon Aurora MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// Aurora PostgreSQL + /// + /// + /// For information, see Amazon Aurora PostgreSQL releases and engine versions + /// (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// MySQL + /// + /// + /// For information, see Amazon RDS for MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) + /// in the Amazon RDS User Guide. + /// + /// + /// PostgreSQL + /// + /// + /// For information, see Amazon RDS for PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) + /// in the Amazon RDS User Guide. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "engineVersion")] pub engine_version: Option, - /// The global cluster ID of an Aurora cluster that becomes the primary cluster in the new global database cluster. - /// Valid for: Aurora DB clusters only + /// The global cluster ID of an Aurora cluster that becomes the primary cluster + /// in the new global database cluster. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalClusterIdentifier")] pub global_cluster_identifier: Option, - /// The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. - /// For information about valid IOPS values, see Provisioned IOPS storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the Amazon RDS User Guide. - /// This setting is required to create a Multi-AZ DB cluster. - /// Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster. - /// Valid for: Multi-AZ DB clusters only + /// The amount of Provisioned IOPS (input/output operations per second) to be + /// initially allocated for each DB instance in the Multi-AZ DB cluster. + /// + /// + /// For information about valid IOPS values, see Provisioned IOPS storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) + /// in the Amazon RDS User Guide. + /// + /// + /// This setting is required to create a Multi-AZ DB cluster. + /// + /// + /// Constraints: Must be a multiple between .5 and 50 of the storage amount for + /// the DB cluster. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none")] pub iops: Option, - /// The Amazon Web Services KMS key identifier for an encrypted DB cluster. - /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. - /// When a KMS key isn't specified in KmsKeyId: - /// * If ReplicationSourceIdentifier identifies an encrypted source, then Amazon RDS will use the KMS key used to encrypt the source. Otherwise, Amazon RDS will use your default KMS key. - /// * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't specified, then Amazon RDS will use your default KMS key. - /// There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. - /// If you create a read replica of an encrypted DB cluster in another Amazon Web Services Region, you must set KmsKeyId to a KMS key identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The Amazon Web Services KMS key identifier for an encrypted DB cluster. + /// + /// + /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias + /// ARN, or alias name for the KMS key. To use a KMS key in a different Amazon + /// Web Services account, specify the key ARN or alias ARN. + /// + /// + /// When a KMS key isn't specified in KmsKeyId: + /// + /// + /// * If ReplicationSourceIdentifier identifies an encrypted source, then + /// Amazon RDS will use the KMS key used to encrypt the source. Otherwise, + /// Amazon RDS will use your default KMS key. + /// + /// + /// * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier + /// isn't specified, then Amazon RDS will use your default KMS key. + /// + /// + /// There is a default KMS key for your Amazon Web Services account. Your Amazon + /// Web Services account has a different default KMS key for each Amazon Web + /// Services Region. + /// + /// + /// If you create a read replica of an encrypted DB cluster in another Amazon + /// Web Services Region, you must set KmsKeyId to a KMS key identifier that is + /// valid in the destination Amazon Web Services Region. This KMS key is used + /// to encrypt the read replica in that Amazon Web Services Region. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] pub kms_key_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyRef")] pub kms_key_ref: Option, - /// A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager. - /// For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the Amazon Aurora User Guide. - /// Constraints: - /// * Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// A value that indicates whether to manage the master user password with Amazon + /// Web Services Secrets Manager. + /// + /// + /// For more information, see Password management with Amazon Web Services Secrets + /// Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + /// in the Amazon RDS User Guide and Password management with Amazon Web Services + /// Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// Constraints: + /// + /// + /// * Can't manage the master user password with Amazon Web Services Secrets + /// Manager if MasterUserPassword is specified. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "manageMasterUserPassword")] pub manage_master_user_password: Option, - /// The password for the master database user. This password can contain any printable ASCII character except "/", """, or "@". - /// Constraints: - /// * Must contain from 8 to 41 characters. - /// * Can't be specified if ManageMasterUserPassword is turned on. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The password for the master database user. This password can contain any + /// printable ASCII character except "/", """, or "@". + /// + /// + /// Constraints: + /// + /// + /// * Must contain from 8 to 41 characters. + /// + /// + /// * Can't be specified if ManageMasterUserPassword is turned on. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserPassword")] pub master_user_password: Option, - /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. - /// This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster. - /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. - /// If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. - /// There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically + /// generated and managed in Amazon Web Services Secrets Manager. + /// + /// + /// This setting is valid only if the master user password is managed by RDS + /// in Amazon Web Services Secrets Manager for the DB cluster. + /// + /// + /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias + /// ARN, or alias name for the KMS key. To use a KMS key in a different Amazon + /// Web Services account, specify the key ARN or alias ARN. + /// + /// + /// If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager + /// KMS key is used to encrypt the secret. If the secret is in a different Amazon + /// Web Services account, then you can't use the aws/secretsmanager KMS key to + /// encrypt the secret, and you must use a customer managed KMS key. + /// + /// + /// There is a default KMS key for your Amazon Web Services account. Your Amazon + /// Web Services account has a different default KMS key for each Amazon Web + /// Services Region. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserSecretKMSKeyID")] pub master_user_secret_kms_key_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserSecretKMSKeyRef")] pub master_user_secret_kms_key_ref: Option, - /// The name of the master user for the DB cluster. - /// Constraints: - /// * Must be 1 to 16 letters or numbers. - /// * First character must be a letter. - /// * Can't be a reserved word for the chosen database engine. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The name of the master user for the DB cluster. + /// + /// + /// Constraints: + /// + /// + /// * Must be 1 to 16 letters or numbers. + /// + /// + /// * First character must be a letter. + /// + /// + /// * Can't be a reserved word for the chosen database engine. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUsername")] pub master_username: Option, - /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0. - /// If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. - /// Valid Values: 0, 1, 5, 10, 15, 30, 60 - /// Valid for: Multi-AZ DB clusters only + /// The interval, in seconds, between points when Enhanced Monitoring metrics + /// are collected for the DB cluster. To turn off collecting Enhanced Monitoring + /// metrics, specify 0. The default is 0. + /// + /// + /// If MonitoringRoleArn is specified, also set MonitoringInterval to a value + /// other than 0. + /// + /// + /// Valid Values: 0, 1, 5, 10, 15, 30, 60 + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "monitoringInterval")] pub monitoring_interval: Option, - /// The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting up and enabling Enhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the Amazon RDS User Guide. - /// If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value. - /// Valid for: Multi-AZ DB clusters only + /// The Amazon Resource Name (ARN) for the IAM role that permits RDS to send + /// Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. + /// For information on creating a monitoring role, see Setting up and enabling + /// Enhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) + /// in the Amazon RDS User Guide. + /// + /// + /// If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn + /// value. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "monitoringRoleARN")] pub monitoring_role_arn: Option, - /// The network type of the DB cluster. - /// Valid values: - /// * IPV4 - /// * DUAL - /// The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - /// For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the Amazon Aurora User Guide. - /// Valid for: Aurora DB clusters only + /// The network type of the DB cluster. + /// + /// + /// Valid values: + /// + /// + /// * IPV4 + /// + /// + /// * DUAL + /// + /// + /// The network type is determined by the DBSubnetGroup specified for the DB + /// cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and + /// the IPv6 protocols (DUAL). + /// + /// + /// For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) + /// in the Amazon Aurora User Guide. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkType")] pub network_type: Option, - /// A value that indicates that the DB cluster should be associated with the specified option group. - /// DB clusters are associated with a default option group that can't be modified. + /// A value that indicates that the DB cluster should be associated with the + /// specified option group. + /// + /// + /// DB clusters are associated with a default option group that can't be modified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "optionGroupName")] pub option_group_name: Option, - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. - /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. - /// If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. - /// Valid for: Multi-AZ DB clusters only + /// The Amazon Web Services KMS key identifier for encryption of Performance + /// Insights data. + /// + /// + /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias + /// ARN, or alias name for the KMS key. + /// + /// + /// If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon + /// RDS uses your default KMS key. There is a default KMS key for your Amazon + /// Web Services account. Your Amazon Web Services account has a different default + /// KMS key for each Amazon Web Services Region. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "performanceInsightsKMSKeyID")] pub performance_insights_kms_key_id: Option, - /// The number of days to retain Performance Insights data. The default is 7 days. The following values are valid: - /// * 7 - /// * month * 31, where month is a number of months from 1-23 - /// * 731 - /// For example, the following values are valid: - /// * 93 (3 months * 31) - /// * 341 (11 months * 31) - /// * 589 (19 months * 31) - /// * 731 - /// If you specify a retention period such as 94, which isn't a valid value, RDS issues an error. - /// Valid for: Multi-AZ DB clusters only + /// The number of days to retain Performance Insights data. The default is 7 + /// days. The following values are valid: + /// + /// + /// * 7 + /// + /// + /// * month * 31, where month is a number of months from 1-23 + /// + /// + /// * 731 + /// + /// + /// For example, the following values are valid: + /// + /// + /// * 93 (3 months * 31) + /// + /// + /// * 341 (11 months * 31) + /// + /// + /// * 589 (19 months * 31) + /// + /// + /// * 731 + /// + /// + /// If you specify a retention period such as 94, which isn't a valid value, + /// RDS issues an error. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "performanceInsightsRetentionPeriod")] pub performance_insights_retention_period: Option, - /// The port number on which the instances in the DB cluster accept connections. - /// RDS for MySQL and Aurora MySQL - /// Default: 3306 - /// Valid values: 1150-65535 - /// RDS for PostgreSQL and Aurora PostgreSQL - /// Default: 5432 - /// Valid values: 1150-65535 - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The port number on which the instances in the DB cluster accept connections. + /// + /// + /// RDS for MySQL and Aurora MySQL + /// + /// + /// Default: 3306 + /// + /// + /// Valid values: 1150-65535 + /// + /// + /// RDS for PostgreSQL and Aurora PostgreSQL + /// + /// + /// Default: 5432 + /// + /// + /// Valid values: 1150-65535 + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none")] pub port: Option, - /// When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, an URL that contains a Signature Version 4 signed request for the CreateDBCluster operation to be called in the source Amazon Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster. - /// The presigned URL must be a valid request for the CreateDBCluster API operation that can run in the source Amazon Web Services Region that contains the encrypted DB cluster to copy. - /// The presigned URL request must contain the following parameter values: - /// * KmsKeyId - The KMS key identifier for the KMS key to use to encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. - /// * DestinationRegion - The name of the Amazon Web Services Region that Aurora read replica will be created in. - /// * ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier would look like Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1. - /// To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). - /// If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. - /// Valid for: Aurora DB clusters only + /// When you are replicating a DB cluster from one Amazon Web Services GovCloud + /// (US) Region to another, an URL that contains a Signature Version 4 signed + /// request for the CreateDBCluster operation to be called in the source Amazon + /// Web Services Region where the DB cluster is replicated from. Specify PreSignedUrl + /// only when you are performing cross-Region replication from an encrypted DB + /// cluster. + /// + /// + /// The presigned URL must be a valid request for the CreateDBCluster API operation + /// that can run in the source Amazon Web Services Region that contains the encrypted + /// DB cluster to copy. + /// + /// + /// The presigned URL request must contain the following parameter values: + /// + /// + /// * KmsKeyId - The KMS key identifier for the KMS key to use to encrypt + /// the copy of the DB cluster in the destination Amazon Web Services Region. + /// This should refer to the same KMS key for both the CreateDBCluster operation + /// that is called in the destination Amazon Web Services Region, and the + /// operation contained in the presigned URL. + /// + /// + /// * DestinationRegion - The name of the Amazon Web Services Region that + /// Aurora read replica will be created in. + /// + /// + /// * ReplicationSourceIdentifier - The DB cluster identifier for the encrypted + /// DB cluster to be copied. This identifier must be in the Amazon Resource + /// Name (ARN) format for the source Amazon Web Services Region. For example, + /// if you are copying an encrypted DB cluster from the us-west-2 Amazon Web + /// Services Region, then your ReplicationSourceIdentifier would look like + /// Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1. + /// + /// + /// To learn how to generate a Signature Version 4 signed request, see Authenticating + /// Requests: Using Query Parameters (Amazon Web Services Signature Version 4) + /// (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + /// and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + /// + /// + /// If you are using an Amazon Web Services SDK tool or the CLI, you can specify + /// SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl + /// manually. Specifying SourceRegion autogenerates a presigned URL that is a + /// valid request for the operation that can run in the source Amazon Web Services + /// Region. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "preSignedURL")] pub pre_signed_url: Option, - /// The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. - /// The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the Amazon Aurora User Guide. - /// Constraints: - /// * Must be in the format hh24:mi-hh24:mi. - /// * Must be in Universal Coordinated Time (UTC). - /// * Must not conflict with the preferred maintenance window. - /// * Must be at least 30 minutes. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The daily time range during which automated backups are created if automated + /// backups are enabled using the BackupRetentionPeriod parameter. + /// + /// + /// The default is a 30-minute window selected at random from an 8-hour block + /// of time for each Amazon Web Services Region. To view the time blocks available, + /// see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) + /// in the Amazon Aurora User Guide. + /// + /// + /// Constraints: + /// + /// + /// * Must be in the format hh24:mi-hh24:mi. + /// + /// + /// * Must be in Universal Coordinated Time (UTC). + /// + /// + /// * Must not conflict with the preferred maintenance window. + /// + /// + /// * Must be at least 30 minutes. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredBackupWindow")] pub preferred_backup_window: Option, - /// The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). - /// Format: ddd:hh24:mi-ddd:hh24:mi - /// The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the Amazon Aurora User Guide. - /// Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. - /// Constraints: Minimum 30-minute window. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The weekly time range during which system maintenance can occur, in Universal + /// Coordinated Time (UTC). + /// + /// + /// Format: ddd:hh24:mi-ddd:hh24:mi + /// + /// + /// The default is a 30-minute window selected at random from an 8-hour block + /// of time for each Amazon Web Services Region, occurring on a random day of + /// the week. To see the time blocks available, see Adjusting the Preferred DB + /// Cluster Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) + /// in the Amazon Aurora User Guide. + /// + /// + /// Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. + /// + /// + /// Constraints: Minimum 30-minute window. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredMaintenanceWindow")] pub preferred_maintenance_window: Option, - /// A value that indicates whether the DB cluster is publicly accessible. - /// When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it. - /// When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address. - /// Default: The default behavior varies depending on whether DBSubnetGroupName is specified. - /// If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: - /// * If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private. - /// * If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public. - /// If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: - /// * If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. - /// * If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public. - /// Valid for: Multi-AZ DB clusters only + /// A value that indicates whether the DB cluster is publicly accessible. + /// + /// + /// When the DB cluster is publicly accessible, its Domain Name System (DNS) + /// endpoint resolves to the private IP address from within the DB cluster's + /// virtual private cloud (VPC). It resolves to the public IP address from outside + /// of the DB cluster's VPC. Access to the DB cluster is ultimately controlled + /// by the security group it uses. That public access isn't permitted if the + /// security group assigned to the DB cluster doesn't permit it. + /// + /// + /// When the DB cluster isn't publicly accessible, it is an internal DB cluster + /// with a DNS name that resolves to a private IP address. + /// + /// + /// Default: The default behavior varies depending on whether DBSubnetGroupName + /// is specified. + /// + /// + /// If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, + /// the following applies: + /// + /// + /// * If the default VPC in the target Region doesn’t have an internet gateway + /// attached to it, the DB cluster is private. + /// + /// + /// * If the default VPC in the target Region has an internet gateway attached + /// to it, the DB cluster is public. + /// + /// + /// If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, + /// the following applies: + /// + /// + /// * If the subnets are part of a VPC that doesn’t have an internet gateway + /// attached to it, the DB cluster is private. + /// + /// + /// * If the subnets are part of a VPC that has an internet gateway attached + /// to it, the DB cluster is public. + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "publiclyAccessible")] pub publicly_accessible: Option, - /// The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The Amazon Resource Name (ARN) of the source DB instance or DB cluster if + /// this DB cluster is created as a read replica. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "replicationSourceIdentifier")] pub replication_source_identifier: Option, - /// For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster. - /// Valid for: Aurora DB clusters only + /// For DB clusters in serverless DB engine mode, the scaling properties of the + /// DB cluster. + /// + /// + /// Valid for: Aurora DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "scalingConfiguration")] pub scaling_configuration: Option, - /// Contains the scaling configuration of an Aurora Serverless v2 DB cluster. - /// For more information, see Using Amazon Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) in the Amazon Aurora User Guide. + /// Contains the scaling configuration of an Aurora Serverless v2 DB cluster. + /// + /// + /// For more information, see Using Amazon Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) + /// in the Amazon Aurora User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "serverlessV2ScalingConfiguration")] pub serverless_v2_scaling_configuration: Option, - /// The identifier for the DB snapshot or DB cluster snapshot to restore from. - /// You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot. - /// Constraints: - /// * Must match the identifier of an existing Snapshot. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// The identifier for the DB snapshot or DB cluster snapshot to restore from. + /// + /// + /// You can use either the name or the Amazon Resource Name (ARN) to specify + /// a DB cluster snapshot. However, you can use only the ARN to specify a DB + /// snapshot. + /// + /// + /// Constraints: + /// + /// + /// * Must match the identifier of an existing Snapshot. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "snapshotIdentifier")] pub snapshot_identifier: Option, - /// SourceRegion is the source region where the resource exists. This is not sent over the wire and is only used for presigning. This value should always have the same region as the source ARN. + /// SourceRegion is the source region where the resource exists. This is not + /// sent over the wire and is only used for presigning. This value should always + /// have the same region as the source ARN. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceRegion")] pub source_region: Option, - /// A value that indicates whether the DB cluster is encrypted. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// A value that indicates whether the DB cluster is encrypted. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageEncrypted")] pub storage_encrypted: Option, - /// Specifies the storage type to be associated with the DB cluster. - /// This setting is required to create a Multi-AZ DB cluster. - /// Valid values: io1 - /// When specified, a value for the Iops parameter is required. - /// Default: io1 - /// Valid for: Multi-AZ DB clusters only + /// Specifies the storage type to be associated with the DB cluster. + /// + /// + /// This setting is required to create a Multi-AZ DB cluster. + /// + /// + /// Valid values: io1 + /// + /// + /// When specified, a value for the Iops parameter is required. + /// + /// + /// Default: io1 + /// + /// + /// Valid for: Multi-AZ DB clusters only #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageType")] pub storage_type: Option, - /// Tags to assign to the DB cluster. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// Tags to assign to the DB cluster. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// A list of EC2 VPC security groups to associate with this DB cluster. - /// Valid for: Aurora DB clusters and Multi-AZ DB clusters + /// A list of EC2 VPC security groups to associate with this DB cluster. + /// + /// + /// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcSecurityGroupIDs")] pub vpc_security_group_i_ds: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcSecurityGroupRefs")] pub vpc_security_group_refs: Option>, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterDbClusterParameterGroupRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterDbClusterParameterGroupRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterDbSubnetGroupRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterDbSubnetGroupRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterKmsKeyRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterKmsKeyRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// The password for the master database user. This password can contain any printable ASCII character except "/", """, or "@". -/// Constraints: -/// * Must contain from 8 to 41 characters. -/// * Can't be specified if ManageMasterUserPassword is turned on. -/// Valid for: Aurora DB clusters and Multi-AZ DB clusters +/// The password for the master database user. This password can contain any +/// printable ASCII character except "/", """, or "@". +/// +/// +/// Constraints: +/// +/// +/// * Must contain from 8 to 41 characters. +/// +/// +/// * Can't be specified if ManageMasterUserPassword is turned on. +/// +/// +/// Valid for: Aurora DB clusters and Multi-AZ DB clusters #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterMasterUserPassword { /// Key is the key within the secret @@ -452,24 +1078,35 @@ pub struct DBClusterMasterUserPassword { pub namespace: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterMasterUserSecretKmsKeyRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterMasterUserSecretKmsKeyRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// For DB clusters in serverless DB engine mode, the scaling properties of the DB cluster. -/// Valid for: Aurora DB clusters only +/// For DB clusters in serverless DB engine mode, the scaling properties of the +/// DB cluster. +/// +/// +/// Valid for: Aurora DB clusters only #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterScalingConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoPause")] @@ -486,8 +1123,11 @@ pub struct DBClusterScalingConfiguration { pub timeout_action: Option, } -/// Contains the scaling configuration of an Aurora Serverless v2 DB cluster. -/// For more information, see Using Amazon Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) in the Amazon Aurora User Guide. +/// Contains the scaling configuration of an Aurora Serverless v2 DB cluster. +/// +/// +/// For more information, see Using Amazon Aurora Serverless v2 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) +/// in the Amazon Aurora User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterServerlessV2ScalingConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxCapacity")] @@ -496,8 +1136,11 @@ pub struct DBClusterServerlessV2ScalingConfiguration { pub min_capacity: Option, } -/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. -/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide. +/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +/// +/// +/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -506,16 +1149,24 @@ pub struct DBClusterTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterVpcSecurityGroupRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterVpcSecurityGroupRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -525,23 +1176,35 @@ pub struct DBClusterVpcSecurityGroupRefsFrom { /// DBClusterStatus defines the observed state of DBCluster #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// The Amazon Web Services KMS key identifier used for encrypting messages in the database activity stream. - /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. + /// The Amazon Web Services KMS key identifier used for encrypting messages in + /// the database activity stream. + /// + /// + /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias + /// ARN, or alias name for the KMS key. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamKMSKeyID")] pub activity_stream_kms_key_id: Option, - /// The name of the Amazon Kinesis data stream used for the database activity stream. + /// The name of the Amazon Kinesis data stream used for the database activity + /// stream. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamKinesisStreamName")] pub activity_stream_kinesis_stream_name: Option, - /// The mode of the database activity stream. Database events such as a change or access generate an activity stream event. The database session can handle these events either synchronously or asynchronously. + /// The mode of the database activity stream. Database events such as a change + /// or access generate an activity stream event. The database session can handle + /// these events either synchronously or asynchronously. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamMode")] pub activity_stream_mode: Option, /// The status of the database activity stream. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamStatus")] pub activity_stream_status: Option, - /// Provides a list of the Amazon Web Services Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf. + /// Provides a list of the Amazon Web Services Identity and Access Management + /// (IAM) roles that are associated with the DB cluster. IAM roles that are associated + /// with a DB cluster grant permission for the DB cluster to access other Amazon + /// Web Services on your behalf. #[serde(default, skip_serializing_if = "Option::is_none", rename = "associatedRoles")] pub associated_roles: Option>, /// The time when a stopped DB cluster is restarted automatically. @@ -550,20 +1213,30 @@ pub struct DBClusterStatus { /// The number of change records stored for Backtrack. #[serde(default, skip_serializing_if = "Option::is_none", rename = "backtrackConsumedChangeRecords")] pub backtrack_consumed_change_records: Option, - /// The current capacity of an Aurora Serverless v1 DB cluster. The capacity is 0 (zero) when the cluster is paused. - /// For more information about Aurora Serverless v1, see Using Amazon Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) in the Amazon Aurora User Guide. + /// The current capacity of an Aurora Serverless v1 DB cluster. The capacity + /// is 0 (zero) when the cluster is paused. + /// + /// + /// For more information about Aurora Serverless v1, see Using Amazon Aurora + /// Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) + /// in the Amazon Aurora User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub capacity: Option, /// Identifies the clone group to which the DB cluster is associated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "cloneGroupID")] pub clone_group_id: Option, - /// Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC). + /// Specifies the time when the DB cluster was created, in Universal Coordinated + /// Time (UTC). #[serde(default, skip_serializing_if = "Option::is_none", rename = "clusterCreateTime")] pub cluster_create_time: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// Specifies whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. + /// Specifies whether the DB cluster is a clone of a DB cluster owned by a different + /// Amazon Web Services account. #[serde(default, skip_serializing_if = "Option::is_none", rename = "crossAccountClone")] pub cross_account_clone: Option, /// Identifies all custom endpoints associated with the cluster. @@ -578,10 +1251,13 @@ pub struct DBClusterStatus { /// Specifies the name of the DB cluster parameter group for the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterParameterGroup")] pub db_cluster_parameter_group: Option, - /// The Amazon Web Services Region-unique, immutable identifier for the DB cluster. This identifier is found in Amazon Web Services CloudTrail log entries whenever the KMS key for the DB cluster is accessed. + /// The Amazon Web Services Region-unique, immutable identifier for the DB cluster. + /// This identifier is found in Amazon Web Services CloudTrail log entries whenever + /// the KMS key for the DB cluster is accessed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterResourceID")] pub db_cluster_resource_id: Option, - /// Specifies information on the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group. + /// Specifies information on the subnet group associated with the DB cluster, + /// including the name, description, and subnets in the subnet group. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSubnetGroup")] pub db_subnet_group: Option, /// The Active Directory Domain membership records associated with the DB cluster. @@ -590,58 +1266,102 @@ pub struct DBClusterStatus { /// The earliest time to which a DB cluster can be backtracked. #[serde(default, skip_serializing_if = "Option::is_none", rename = "earliestBacktrackTime")] pub earliest_backtrack_time: Option, - /// The earliest time to which a database can be restored with point-in-time restore. + /// The earliest time to which a database can be restored with point-in-time + /// restore. #[serde(default, skip_serializing_if = "Option::is_none", rename = "earliestRestorableTime")] pub earliest_restorable_time: Option, - /// A list of log types that this DB cluster is configured to export to CloudWatch Logs. - /// Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html) in the Amazon Aurora User Guide. + /// A list of log types that this DB cluster is configured to export to CloudWatch + /// Logs. + /// + /// + /// Log types vary by DB engine. For information about the log types for each + /// DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html) + /// in the Amazon Aurora User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enabledCloudwatchLogsExports")] pub enabled_cloudwatch_logs_exports: Option>, /// Specifies the connection endpoint for the primary instance of the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoint: Option, - /// Specifies whether you have requested to enable write forwarding for a secondary cluster in an Aurora global database. Because write forwarding takes time to enable, check the value of GlobalWriteForwardingStatus to confirm that the request has completed before using the write forwarding feature for this cluster. + /// Specifies whether you have requested to enable write forwarding for a secondary + /// cluster in an Aurora global database. Because write forwarding takes time + /// to enable, check the value of GlobalWriteForwardingStatus to confirm that + /// the request has completed before using the write forwarding feature for this + /// cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalWriteForwardingRequested")] pub global_write_forwarding_requested: Option, - /// Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it. + /// Specifies whether a secondary cluster in an Aurora global database has write + /// forwarding enabled, not enabled, or is in the process of enabling it. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalWriteForwardingStatus")] pub global_write_forwarding_status: Option, /// Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostedZoneID")] pub hosted_zone_id: Option, - /// A value that indicates whether the HTTP endpoint for an Aurora Serverless v1 DB cluster is enabled. - /// When enabled, the HTTP endpoint provides a connectionless web service API for running SQL queries on the Aurora Serverless v1 DB cluster. You can also query your database from inside the RDS console with the query editor. - /// For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the Amazon Aurora User Guide. + /// A value that indicates whether the HTTP endpoint for an Aurora Serverless + /// v1 DB cluster is enabled. + /// + /// + /// When enabled, the HTTP endpoint provides a connectionless web service API + /// for running SQL queries on the Aurora Serverless v1 DB cluster. You can also + /// query your database from inside the RDS console with the query editor. + /// + /// + /// For more information, see Using the Data API for Aurora Serverless v1 (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) + /// in the Amazon Aurora User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpEndpointEnabled")] pub http_endpoint_enabled: Option, - /// A value that indicates whether the mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled. + /// A value that indicates whether the mapping of Amazon Web Services Identity + /// and Access Management (IAM) accounts to database accounts is enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "iamDatabaseAuthenticationEnabled")] pub iam_database_authentication_enabled: Option, - /// Specifies the latest time to which a database can be restored with point-in-time restore. + /// Specifies the latest time to which a database can be restored with point-in-time + /// restore. #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestRestorableTime")] pub latest_restorable_time: Option, - /// Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. - /// For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the Amazon Aurora User Guide. + /// Contains the secret managed by RDS in Amazon Web Services Secrets Manager + /// for the master user password. + /// + /// + /// For more information, see Password management with Amazon Web Services Secrets + /// Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + /// in the Amazon RDS User Guide and Password management with Amazon Web Services + /// Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) + /// in the Amazon Aurora User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserSecret")] pub master_user_secret: Option, /// Specifies whether the DB cluster has instances in multiple Availability Zones. #[serde(default, skip_serializing_if = "Option::is_none", rename = "multiAZ")] pub multi_az: Option, - /// A value that specifies that changes to the DB cluster are pending. This element is only included when changes are pending. Specific changes are identified by subelements. + /// A value that specifies that changes to the DB cluster are pending. This element + /// is only included when changes are pending. Specific changes are identified + /// by subelements. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingModifiedValues")] pub pending_modified_values: Option, /// Specifies the progress of the operation as a percentage. #[serde(default, skip_serializing_if = "Option::is_none", rename = "percentProgress")] pub percent_progress: Option, - /// True if Performance Insights is enabled for the DB cluster, and otherwise false. - /// This setting is only for non-Aurora Multi-AZ DB clusters. + /// True if Performance Insights is enabled for the DB cluster, and otherwise + /// false. + /// + /// + /// This setting is only for non-Aurora Multi-AZ DB clusters. #[serde(default, skip_serializing_if = "Option::is_none", rename = "performanceInsightsEnabled")] pub performance_insights_enabled: Option, - /// Contains one or more identifiers of the read replicas associated with this DB cluster. + /// Contains one or more identifiers of the read replicas associated with this + /// DB cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readReplicaIdentifiers")] pub read_replica_identifiers: Option>, - /// The reader endpoint for the DB cluster. The reader endpoint for a DB cluster load-balances connections across the Aurora Replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Aurora distributes the connection requests among the Aurora Replicas in the DB cluster. This functionality can help balance your read workload across multiple Aurora Replicas in your DB cluster. - /// If a failover occurs, and the Aurora Replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Aurora Replicas in the cluster, you can then reconnect to the reader endpoint. + /// The reader endpoint for the DB cluster. The reader endpoint for a DB cluster + /// load-balances connections across the Aurora Replicas that are available in + /// a DB cluster. As clients request new connections to the reader endpoint, + /// Aurora distributes the connection requests among the Aurora Replicas in the + /// DB cluster. This functionality can help balance your read workload across + /// multiple Aurora Replicas in your DB cluster. + /// + /// + /// If a failover occurs, and the Aurora Replica that you are connected to is + /// promoted to be the primary instance, your connection is dropped. To continue + /// sending your read workload to other Aurora Replicas in the cluster, you can + /// then reconnect to the reader endpoint. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readerEndpoint")] pub reader_endpoint: Option, /// Specifies the current state of this DB cluster. @@ -654,20 +1374,31 @@ pub struct DBClusterStatus { pub vpc_security_groups: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Describes an Amazon Web Services Identity and Access Management (IAM) role that is associated with a DB cluster. +/// Describes an Amazon Web Services Identity and Access Management (IAM) role +/// that is associated with a DB cluster. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusAssociatedRoles { #[serde(default, skip_serializing_if = "Option::is_none", rename = "featureName")] @@ -678,7 +1409,9 @@ pub struct DBClusterStatusAssociatedRoles { pub status: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusConditions { /// Last time the condition transitioned from one status to another. @@ -719,7 +1452,8 @@ pub struct DBClusterStatusDbClusterOptionGroupMemberships { pub status: Option, } -/// An Active Directory Domain membership record associated with the DB instance or cluster. +/// An Active Directory Domain membership record associated with the DB instance +/// or cluster. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusDomainMemberships { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -732,8 +1466,15 @@ pub struct DBClusterStatusDomainMemberships { pub status: Option, } -/// Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. -/// For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the Amazon Aurora User Guide. +/// Contains the secret managed by RDS in Amazon Web Services Secrets Manager +/// for the master user password. +/// +/// +/// For more information, see Password management with Amazon Web Services Secrets +/// Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) +/// in the Amazon RDS User Guide and Password management with Amazon Web Services +/// Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) +/// in the Amazon Aurora User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusMasterUserSecret { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] @@ -744,7 +1485,9 @@ pub struct DBClusterStatusMasterUserSecret { pub secret_status: Option, } -/// A value that specifies that changes to the DB cluster are pending. This element is only included when changes are pending. Specific changes are identified by subelements. +/// A value that specifies that changes to the DB cluster are pending. This element +/// is only included when changes are pending. Specific changes are identified +/// by subelements. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusPendingModifiedValues { #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocatedStorage")] @@ -761,12 +1504,14 @@ pub struct DBClusterStatusPendingModifiedValues { pub iops: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserPassword")] pub master_user_password: Option, - /// A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated. + /// A list of the log types whose configuration is still pending. In other words, + /// these log types are in the process of being activated or deactivated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingCloudwatchLogsExports")] pub pending_cloudwatch_logs_exports: Option, } -/// A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated. +/// A list of the log types whose configuration is still pending. In other words, +/// these log types are in the process of being activated or deactivated. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusPendingModifiedValuesPendingCloudwatchLogsExports { #[serde(default, skip_serializing_if = "Option::is_none", rename = "logTypesToDisable")] @@ -775,8 +1520,11 @@ pub struct DBClusterStatusPendingModifiedValuesPendingCloudwatchLogsExports { pub log_types_to_enable: Option>, } -/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. -/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide. +/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +/// +/// +/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusTagList { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -785,7 +1533,8 @@ pub struct DBClusterStatusTagList { pub value: Option, } -/// This data type is used as a response element for queries on VPC security group membership. +/// This data type is used as a response element for queries on VPC security +/// group membership. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBClusterStatusVpcSecurityGroups { #[serde(default, skip_serializing_if = "Option::is_none")] diff --git a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbinstances.rs b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbinstances.rs index 479c4f517..d1bb1e095 100644 --- a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbinstances.rs +++ b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbinstances.rs @@ -5,646 +5,1729 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// DBInstanceSpec defines the desired state of DBInstance. -/// Contains the details of an Amazon RDS DB instance. -/// This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance. +/// DBInstanceSpec defines the desired state of DBInstance. +/// +/// +/// Contains the details of an Amazon RDS DB instance. +/// +/// +/// This data type is used as a response element in the operations CreateDBInstance, +/// CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, +/// PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, +/// RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "rds.services.k8s.aws", version = "v1alpha1", kind = "DBInstance", plural = "dbinstances")] #[kube(namespaced)] #[kube(status = "DBInstanceStatus")] #[kube(schema = "disabled")] pub struct DBInstanceSpec { - /// The amount of storage in gibibytes (GiB) to allocate for the DB instance. - /// Type: Integer - /// Amazon Aurora - /// Not applicable. Aurora cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in an Aurora cluster volume. - /// Amazon RDS Custom - /// Constraints to the amount of storage for each storage type are the following: - /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. - /// * Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. - /// MySQL - /// Constraints to the amount of storage for each storage type are the following: - /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. - /// * Magnetic storage (standard): Must be an integer from 5 to 3072. - /// MariaDB - /// Constraints to the amount of storage for each storage type are the following: - /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. - /// * Magnetic storage (standard): Must be an integer from 5 to 3072. - /// PostgreSQL - /// Constraints to the amount of storage for each storage type are the following: - /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. - /// * Magnetic storage (standard): Must be an integer from 5 to 3072. - /// Oracle - /// Constraints to the amount of storage for each storage type are the following: - /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 to 65536. - /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. - /// * Magnetic storage (standard): Must be an integer from 10 to 3072. - /// SQL Server - /// Constraints to the amount of storage for each storage type are the following: - /// * General Purpose (SSD) storage (gp2, gp3): Enterprise and Standard editions: Must be an integer from 20 to 16384. Web and Express editions: Must be an integer from 20 to 16384. - /// * Provisioned IOPS storage (io1): Enterprise and Standard editions: Must be an integer from 100 to 16384. Web and Express editions: Must be an integer from 100 to 16384. - /// * Magnetic storage (standard): Enterprise and Standard editions: Must be an integer from 20 to 1024. Web and Express editions: Must be an integer from 20 to 1024. + /// The amount of storage in gibibytes (GiB) to allocate for the DB instance. + /// + /// + /// Type: Integer + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. Aurora cluster volumes automatically grow as the amount of + /// data in your database increases, though you are only charged for the space + /// that you use in an Aurora cluster volume. + /// + /// + /// Amazon RDS Custom + /// + /// + /// Constraints to the amount of storage for each storage type are the following: + /// + /// + /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 40 + /// to 65536 for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. + /// + /// + /// * Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 + /// for RDS Custom for Oracle, 16384 for RDS Custom for SQL Server. + /// + /// + /// MySQL + /// + /// + /// Constraints to the amount of storage for each storage type are the following: + /// + /// + /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 + /// to 65536. + /// + /// + /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. + /// + /// + /// * Magnetic storage (standard): Must be an integer from 5 to 3072. + /// + /// + /// MariaDB + /// + /// + /// Constraints to the amount of storage for each storage type are the following: + /// + /// + /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 + /// to 65536. + /// + /// + /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. + /// + /// + /// * Magnetic storage (standard): Must be an integer from 5 to 3072. + /// + /// + /// PostgreSQL + /// + /// + /// Constraints to the amount of storage for each storage type are the following: + /// + /// + /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 + /// to 65536. + /// + /// + /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. + /// + /// + /// * Magnetic storage (standard): Must be an integer from 5 to 3072. + /// + /// + /// Oracle + /// + /// + /// Constraints to the amount of storage for each storage type are the following: + /// + /// + /// * General Purpose (SSD) storage (gp2, gp3): Must be an integer from 20 + /// to 65536. + /// + /// + /// * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. + /// + /// + /// * Magnetic storage (standard): Must be an integer from 10 to 3072. + /// + /// + /// SQL Server + /// + /// + /// Constraints to the amount of storage for each storage type are the following: + /// + /// + /// * General Purpose (SSD) storage (gp2, gp3): Enterprise and Standard editions: + /// Must be an integer from 20 to 16384. Web and Express editions: Must be + /// an integer from 20 to 16384. + /// + /// + /// * Provisioned IOPS storage (io1): Enterprise and Standard editions: Must + /// be an integer from 100 to 16384. Web and Express editions: Must be an + /// integer from 100 to 16384. + /// + /// + /// * Magnetic storage (standard): Enterprise and Standard editions: Must + /// be an integer from 20 to 1024. Web and Express editions: Must be an integer + /// from 20 to 1024. #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocatedStorage")] pub allocated_storage: Option, - /// A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically. - /// If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade to false. + /// A value that indicates whether minor engine upgrades are applied automatically + /// to the DB instance during the maintenance window. By default, minor engine + /// upgrades are applied automatically. + /// + /// + /// If you create an RDS Custom DB instance, you must set AutoMinorVersionUpgrade + /// to false. #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoMinorVersionUpgrade")] pub auto_minor_version_upgrade: Option, - /// The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). - /// Amazon Aurora - /// Each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. - /// Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region. - /// Example: us-east-1d - /// Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint. + /// The Availability Zone (AZ) where the database will be created. For information + /// on Amazon Web Services Regions and Availability Zones, see Regions and Availability + /// Zones (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). + /// + /// + /// Amazon Aurora + /// + /// + /// Each Aurora DB cluster hosts copies of its storage in three separate Availability + /// Zones. Specify one of these Availability Zones. Aurora automatically chooses + /// an appropriate Availability Zone if you don't specify one. + /// + /// + /// Default: A random, system-chosen Availability Zone in the endpoint's Amazon + /// Web Services Region. + /// + /// + /// Example: us-east-1d + /// + /// + /// Constraint: The AvailabilityZone parameter can't be specified if the DB instance + /// is a Multi-AZ deployment. The specified Availability Zone must be in the + /// same Amazon Web Services Region as the current endpoint. #[serde(default, skip_serializing_if = "Option::is_none", rename = "availabilityZone")] pub availability_zone: Option, - /// The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups. - /// Amazon Aurora - /// Not applicable. The retention period for automated backups is managed by the DB cluster. - /// Default: 1 - /// Constraints: - /// * Must be a value from 0 to 35 - /// * Can't be set to 0 if the DB instance is a source to read replicas - /// * Can't be set to 0 for an RDS Custom for Oracle DB instance + /// The number of days for which automated backups are retained. Setting this + /// parameter to a positive number enables backups. Setting this parameter to + /// 0 disables automated backups. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The retention period for automated backups is managed by + /// the DB cluster. + /// + /// + /// Default: 1 + /// + /// + /// Constraints: + /// + /// + /// * Must be a value from 0 to 35 + /// + /// + /// * Can't be set to 0 if the DB instance is a source to read replicas + /// + /// + /// * Can't be set to 0 for an RDS Custom for Oracle DB instance #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupRetentionPeriod")] pub backup_retention_period: Option, - /// Specifies where automated backups and manual snapshots are stored. - /// Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region. - /// For more information, see Working with Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. + /// Specifies where automated backups and manual snapshots are stored. + /// + /// + /// Possible values are outposts (Amazon Web Services Outposts) and region (Amazon + /// Web Services Region). The default is region. + /// + /// + /// For more information, see Working with Amazon RDS on Amazon Web Services + /// Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "backupTarget")] pub backup_target: Option, - /// Specifies the CA certificate identifier to use for the DB instance’s server certificate. - /// This setting doesn't apply to RDS Custom. - /// For more information, see Using SSL/TLS to encrypt a connection to a DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the Amazon Aurora User Guide. + /// Specifies the CA certificate identifier to use for the DB instance’s server + /// certificate. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// For more information, see Using SSL/TLS to encrypt a connection to a DB instance + /// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) + /// in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to + /// a DB cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) + /// in the Amazon Aurora User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "caCertificateIdentifier")] pub ca_certificate_identifier: Option, - /// For supported engines, this value indicates that the DB instance should be associated with the specified CharacterSet. - /// This setting doesn't apply to RDS Custom. However, if you need to change the character set, you can change it on the database itself. - /// Amazon Aurora - /// Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster. + /// For supported engines, this value indicates that the DB instance should be + /// associated with the specified CharacterSet. + /// + /// + /// This setting doesn't apply to RDS Custom. However, if you need to change + /// the character set, you can change it on the database itself. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The character set is managed by the DB cluster. For more + /// information, see CreateDBCluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "characterSetName")] pub character_set_name: Option, - /// A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied. - /// Amazon Aurora - /// Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting. + /// A value that indicates whether to copy tags from the DB instance to snapshots + /// of the DB instance. By default, tags are not copied. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting + /// this value for an Aurora DB instance has no effect on the DB cluster setting. #[serde(default, skip_serializing_if = "Option::is_none", rename = "copyTagsToSnapshot")] pub copy_tags_to_snapshot: Option, - /// The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: - /// * The profile must exist in your account. - /// * The profile must have an IAM role that Amazon EC2 has permissions to assume. - /// * The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. - /// For the list of permissions required for the IAM role, see Configure IAM and your VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) in the Amazon RDS User Guide. - /// This setting is required for RDS Custom. + /// The instance profile associated with the underlying Amazon EC2 instance of + /// an RDS Custom DB instance. The instance profile must meet the following requirements: + /// + /// + /// * The profile must exist in your account. + /// + /// + /// * The profile must have an IAM role that Amazon EC2 has permissions to + /// assume. + /// + /// + /// * The instance profile name and the associated IAM role name must start + /// with the prefix AWSRDSCustom. + /// + /// + /// For the list of permissions required for the IAM role, see Configure IAM + /// and your VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) + /// in the Amazon RDS User Guide. + /// + /// + /// This setting is required for RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "customIAMInstanceProfile")] pub custom_iam_instance_profile: Option, - /// The identifier of the DB cluster that the instance will belong to. - /// This setting doesn't apply to RDS Custom. + /// The identifier of the DB cluster that the instance will belong to. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterIdentifier")] pub db_cluster_identifier: Option, - /// The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore from. - /// For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the Amazon RDS User Guide. - /// Constraints: - /// * Must match the identifier of an existing Multi-AZ DB cluster snapshot. - /// * Can't be specified when DBSnapshotIdentifier is specified. - /// * Must be specified when DBSnapshotIdentifier isn't specified. - /// * If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the DBClusterSnapshotIdentifier must be the ARN of the shared snapshot. - /// * Can't be the identifier of an Aurora DB cluster snapshot. - /// * Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster snapshot. + /// The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore + /// from. + /// + /// + /// For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments + /// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) + /// in the Amazon RDS User Guide. + /// + /// + /// Constraints: + /// + /// + /// * Must match the identifier of an existing Multi-AZ DB cluster snapshot. + /// + /// + /// * Can't be specified when DBSnapshotIdentifier is specified. + /// + /// + /// * Must be specified when DBSnapshotIdentifier isn't specified. + /// + /// + /// * If you are restoring from a shared manual Multi-AZ DB cluster snapshot, + /// the DBClusterSnapshotIdentifier must be the ARN of the shared snapshot. + /// + /// + /// * Can't be the identifier of an Aurora DB cluster snapshot. + /// + /// + /// * Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster + /// snapshot. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterSnapshotIdentifier")] pub db_cluster_snapshot_identifier: Option, - /// The compute and memory capacity of the DB instance, for example db.m5.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide or Aurora DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the Amazon Aurora User Guide. + /// The compute and memory capacity of the DB instance, for example db.m5.large. + /// Not all DB instance classes are available in all Amazon Web Services Regions, + /// or for all database engines. For the full list of DB instance classes, and + /// availability for your engine, see DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) + /// in the Amazon RDS User Guide or Aurora DB instance classes (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) + /// in the Amazon Aurora User Guide. #[serde(rename = "dbInstanceClass")] pub db_instance_class: String, - /// The DB instance identifier. This parameter is stored as a lowercase string. - /// Constraints: - /// * Must contain from 1 to 63 letters, numbers, or hyphens. - /// * First character must be a letter. - /// * Can't end with a hyphen or contain two consecutive hyphens. - /// Example: mydbinstance + /// The DB instance identifier. This parameter is stored as a lowercase string. + /// + /// + /// Constraints: + /// + /// + /// * Must contain from 1 to 63 letters, numbers, or hyphens. + /// + /// + /// * First character must be a letter. + /// + /// + /// * Can't end with a hyphen or contain two consecutive hyphens. + /// + /// + /// Example: mydbinstance #[serde(rename = "dbInstanceIdentifier")] pub db_instance_identifier: String, - /// The meaning of this parameter differs according to the database engine you use. - /// MySQL - /// The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. - /// Constraints: - /// * Must contain 1 to 64 letters or numbers. - /// * Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). - /// * Can't be a word reserved by the specified database engine - /// MariaDB - /// The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. - /// Constraints: - /// * Must contain 1 to 64 letters or numbers. - /// * Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). - /// * Can't be a word reserved by the specified database engine - /// PostgreSQL - /// The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance. - /// Constraints: - /// * Must contain 1 to 63 letters, numbers, or underscores. - /// * Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). - /// * Can't be a word reserved by the specified database engine - /// Oracle - /// The Oracle System ID (SID) of the created DB instance. If you specify null, the default value ORCL is used. You can't specify the string NULL, or any other reserved word, for DBName. - /// Default: ORCL - /// Constraints: - /// * Can't be longer than 8 characters - /// Amazon RDS Custom for Oracle - /// The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL. - /// Default: ORCL - /// Constraints: - /// * It must contain 1 to 8 alphanumeric characters. - /// * It must contain a letter. - /// * It can't be a word reserved by the database engine. - /// Amazon RDS Custom for SQL Server - /// Not applicable. Must be null. - /// SQL Server - /// Not applicable. Must be null. - /// Amazon Aurora MySQL - /// The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster. - /// Constraints: - /// * It must contain 1 to 64 alphanumeric characters. - /// * It can't be a word reserved by the database engine. - /// Amazon Aurora PostgreSQL - /// The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster. - /// Constraints: - /// * It must contain 1 to 63 alphanumeric characters. - /// * It must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9). - /// * It can't be a word reserved by the database engine. + /// The meaning of this parameter differs according to the database engine you + /// use. + /// + /// + /// MySQL + /// + /// + /// The name of the database to create when the DB instance is created. If this + /// parameter isn't specified, no database is created in the DB instance. + /// + /// + /// Constraints: + /// + /// + /// * Must contain 1 to 64 letters or numbers. + /// + /// + /// * Must begin with a letter. Subsequent characters can be letters, underscores, + /// or digits (0-9). + /// + /// + /// * Can't be a word reserved by the specified database engine + /// + /// + /// MariaDB + /// + /// + /// The name of the database to create when the DB instance is created. If this + /// parameter isn't specified, no database is created in the DB instance. + /// + /// + /// Constraints: + /// + /// + /// * Must contain 1 to 64 letters or numbers. + /// + /// + /// * Must begin with a letter. Subsequent characters can be letters, underscores, + /// or digits (0-9). + /// + /// + /// * Can't be a word reserved by the specified database engine + /// + /// + /// PostgreSQL + /// + /// + /// The name of the database to create when the DB instance is created. If this + /// parameter isn't specified, a database named postgres is created in the DB + /// instance. + /// + /// + /// Constraints: + /// + /// + /// * Must contain 1 to 63 letters, numbers, or underscores. + /// + /// + /// * Must begin with a letter. Subsequent characters can be letters, underscores, + /// or digits (0-9). + /// + /// + /// * Can't be a word reserved by the specified database engine + /// + /// + /// Oracle + /// + /// + /// The Oracle System ID (SID) of the created DB instance. If you specify null, + /// the default value ORCL is used. You can't specify the string NULL, or any + /// other reserved word, for DBName. + /// + /// + /// Default: ORCL + /// + /// + /// Constraints: + /// + /// + /// * Can't be longer than 8 characters + /// + /// + /// Amazon RDS Custom for Oracle + /// + /// + /// The Oracle System ID (SID) of the created RDS Custom DB instance. If you + /// don't specify a value, the default value is ORCL. + /// + /// + /// Default: ORCL + /// + /// + /// Constraints: + /// + /// + /// * It must contain 1 to 8 alphanumeric characters. + /// + /// + /// * It must contain a letter. + /// + /// + /// * It can't be a word reserved by the database engine. + /// + /// + /// Amazon RDS Custom for SQL Server + /// + /// + /// Not applicable. Must be null. + /// + /// + /// SQL Server + /// + /// + /// Not applicable. Must be null. + /// + /// + /// Amazon Aurora MySQL + /// + /// + /// The name of the database to create when the primary DB instance of the Aurora + /// MySQL DB cluster is created. If this parameter isn't specified for an Aurora + /// MySQL DB cluster, no database is created in the DB cluster. + /// + /// + /// Constraints: + /// + /// + /// * It must contain 1 to 64 alphanumeric characters. + /// + /// + /// * It can't be a word reserved by the database engine. + /// + /// + /// Amazon Aurora PostgreSQL + /// + /// + /// The name of the database to create when the primary DB instance of the Aurora + /// PostgreSQL DB cluster is created. If this parameter isn't specified for an + /// Aurora PostgreSQL DB cluster, a database named postgres is created in the + /// DB cluster. + /// + /// + /// Constraints: + /// + /// + /// * It must contain 1 to 63 alphanumeric characters. + /// + /// + /// * It must begin with a letter. Subsequent characters can be letters, underscores, + /// or digits (0 to 9). + /// + /// + /// * It can't be a word reserved by the database engine. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbName")] pub db_name: Option, - /// The name of the DB parameter group to associate with this DB instance. If you do not specify a value, then the default DB parameter group for the specified DB engine and version is used. - /// This setting doesn't apply to RDS Custom. - /// Constraints: - /// * It must be 1 to 255 letters, numbers, or hyphens. - /// * The first character must be a letter. - /// * It can't end with a hyphen or contain two consecutive hyphens. + /// The name of the DB parameter group to associate with this DB instance. If + /// you do not specify a value, then the default DB parameter group for the specified + /// DB engine and version is used. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Constraints: + /// + /// + /// * It must be 1 to 255 letters, numbers, or hyphens. + /// + /// + /// * The first character must be a letter. + /// + /// + /// * It can't end with a hyphen or contain two consecutive hyphens. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbParameterGroupName")] pub db_parameter_group_name: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbParameterGroupRef")] pub db_parameter_group_ref: Option, - /// The identifier for the DB snapshot to restore from. - /// Constraints: - /// * Must match the identifier of an existing DBSnapshot. - /// * Can't be specified when DBClusterSnapshotIdentifier is specified. - /// * Must be specified when DBClusterSnapshotIdentifier isn't specified. - /// * If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. + /// The identifier for the DB snapshot to restore from. + /// + /// + /// Constraints: + /// + /// + /// * Must match the identifier of an existing DBSnapshot. + /// + /// + /// * Can't be specified when DBClusterSnapshotIdentifier is specified. + /// + /// + /// * Must be specified when DBClusterSnapshotIdentifier isn't specified. + /// + /// + /// * If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier + /// must be the ARN of the shared DB snapshot. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSnapshotIdentifier")] pub db_snapshot_identifier: Option, - /// A DB subnet group to associate with this DB instance. - /// Constraints: Must match the name of an existing DBSubnetGroup. Must not be default. - /// Example: mydbsubnetgroup + /// A DB subnet group to associate with this DB instance. + /// + /// + /// Constraints: Must match the name of an existing DBSubnetGroup. Must not be + /// default. + /// + /// + /// Example: mydbsubnetgroup #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSubnetGroupName")] pub db_subnet_group_name: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSubnetGroupRef")] pub db_subnet_group_ref: Option, - /// A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). - /// Amazon Aurora - /// Not applicable. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster. + /// A value that indicates whether the DB instance has deletion protection enabled. + /// The database can't be deleted when deletion protection is enabled. By default, + /// deletion protection isn't enabled. For more information, see Deleting a DB + /// Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. You can enable or disable deletion protection for the DB + /// cluster. For more information, see CreateDBCluster. DB instances in a DB + /// cluster can be deleted even when deletion protection is enabled for the DB + /// cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "deletionProtection")] pub deletion_protection: Option, /// DestinationRegion is used for presigning the request to a given region. #[serde(default, skip_serializing_if = "Option::is_none", rename = "destinationRegion")] pub destination_region: Option, - /// The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain. - /// For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) in the Amazon RDS User Guide. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. The domain is managed by the DB cluster. + /// The Active Directory directory ID to create the DB instance in. Currently, + /// only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can + /// be created in an Active Directory Domain. + /// + /// + /// For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) + /// in the Amazon RDS User Guide. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The domain is managed by the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none")] pub domain: Option, - /// Specify the name of the IAM role to be used when making API calls to the Directory Service. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. The domain is managed by the DB cluster. + /// Specify the name of the IAM role to be used when making API calls to the + /// Directory Service. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The domain is managed by the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainIAMRoleName")] pub domain_iam_role_name: Option, - /// The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine. For more information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the Amazon RDS User Guide. - /// Amazon Aurora - /// Not applicable. CloudWatch Logs exports are managed by the DB cluster. - /// RDS Custom - /// Not applicable. - /// MariaDB - /// Possible values are audit, error, general, and slowquery. - /// Microsoft SQL Server - /// Possible values are agent and error. - /// MySQL - /// Possible values are audit, error, general, and slowquery. - /// Oracle - /// Possible values are alert, audit, listener, trace, and oemagent. - /// PostgreSQL - /// Possible values are postgresql and upgrade. + /// The list of log types that need to be enabled for exporting to CloudWatch + /// Logs. The values in the list depend on the DB engine. For more information, + /// see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + /// in the Amazon RDS User Guide. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. CloudWatch Logs exports are managed by the DB cluster. + /// + /// + /// RDS Custom + /// + /// + /// Not applicable. + /// + /// + /// MariaDB + /// + /// + /// Possible values are audit, error, general, and slowquery. + /// + /// + /// Microsoft SQL Server + /// + /// + /// Possible values are agent and error. + /// + /// + /// MySQL + /// + /// + /// Possible values are audit, error, general, and slowquery. + /// + /// + /// Oracle + /// + /// + /// Possible values are alert, audit, listener, trace, and oemagent. + /// + /// + /// PostgreSQL + /// + /// + /// Possible values are postgresql and upgrade. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableCloudwatchLogsExports")] pub enable_cloudwatch_logs_exports: Option>, - /// A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance. - /// A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network. - /// For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. - /// For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) in the Amazon Web Services Outposts User Guide. + /// A value that indicates whether to enable a customer-owned IP address (CoIP) + /// for an RDS on Outposts DB instance. + /// + /// + /// A CoIP provides local or external connectivity to resources in your Outpost + /// subnets through your on-premises network. For some use cases, a CoIP can + /// provide lower latency for connections to the DB instance from outside of + /// its virtual private cloud (VPC) on your local network. + /// + /// + /// For more information about RDS on Outposts, see Working with Amazon RDS on + /// Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + /// in the Amazon RDS User Guide. + /// + /// + /// For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) + /// in the Amazon Web Services Outposts User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableCustomerOwnedIP")] pub enable_customer_owned_ip: Option, - /// A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. - /// For more information, see IAM Database Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) in the Amazon RDS User Guide. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster. + /// A value that indicates whether to enable mapping of Amazon Web Services Identity + /// and Access Management (IAM) accounts to database accounts. By default, mapping + /// isn't enabled. + /// + /// + /// For more information, see IAM Database Authentication for MySQL and PostgreSQL + /// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + /// in the Amazon RDS User Guide. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. Mapping Amazon Web Services IAM accounts to database accounts + /// is managed by the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableIAMDatabaseAuthentication")] pub enable_iam_database_authentication: Option, - /// The name of the database engine to be used for this instance. - /// Not every database engine is available for every Amazon Web Services Region. - /// Valid Values: - /// * aurora (for MySQL 5.6-compatible Aurora) - /// * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) - /// * aurora-postgresql - /// * custom-oracle-ee (for RDS Custom for Oracle instances) - /// * custom-sqlserver-ee (for RDS Custom for SQL Server instances) - /// * custom-sqlserver-se (for RDS Custom for SQL Server instances) - /// * custom-sqlserver-web (for RDS Custom for SQL Server instances) - /// * mariadb - /// * mysql - /// * oracle-ee - /// * oracle-ee-cdb - /// * oracle-se2 - /// * oracle-se2-cdb - /// * postgres - /// * sqlserver-ee - /// * sqlserver-se - /// * sqlserver-ex - /// * sqlserver-web + /// The name of the database engine to be used for this instance. + /// + /// + /// Not every database engine is available for every Amazon Web Services Region. + /// + /// + /// Valid Values: + /// + /// + /// * aurora (for MySQL 5.6-compatible Aurora) + /// + /// + /// * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) + /// + /// + /// * aurora-postgresql + /// + /// + /// * custom-oracle-ee (for RDS Custom for Oracle instances) + /// + /// + /// * custom-sqlserver-ee (for RDS Custom for SQL Server instances) + /// + /// + /// * custom-sqlserver-se (for RDS Custom for SQL Server instances) + /// + /// + /// * custom-sqlserver-web (for RDS Custom for SQL Server instances) + /// + /// + /// * mariadb + /// + /// + /// * mysql + /// + /// + /// * oracle-ee + /// + /// + /// * oracle-ee-cdb + /// + /// + /// * oracle-se2 + /// + /// + /// * oracle-se2-cdb + /// + /// + /// * postgres + /// + /// + /// * sqlserver-ee + /// + /// + /// * sqlserver-se + /// + /// + /// * sqlserver-ex + /// + /// + /// * sqlserver-web pub engine: String, - /// The version number of the database engine to use. - /// For a list of valid engine versions, use the DescribeDBEngineVersions operation. - /// The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region. - /// Amazon Aurora - /// Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster. - /// Amazon RDS Custom for Oracle - /// A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-creating.html#custom-creating.create) in the Amazon RDS User Guide. - /// Amazon RDS Custom for SQL Server - /// See RDS Custom for SQL Server general requirements (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-reqs-limits-MS.html) in the Amazon RDS User Guide. - /// MariaDB - /// For information, see MariaDB on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MariaDB.html#MariaDB.Concepts.VersionMgmt) in the Amazon RDS User Guide. - /// Microsoft SQL Server - /// For information, see Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) in the Amazon RDS User Guide. - /// MySQL - /// For information, see MySQL on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the Amazon RDS User Guide. - /// Oracle - /// For information, see Oracle Database Engine Release Notes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.PatchComposition.html) in the Amazon RDS User Guide. - /// PostgreSQL - /// For information, see Amazon RDS for PostgreSQL versions and extensions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the Amazon RDS User Guide. + /// The version number of the database engine to use. + /// + /// + /// For a list of valid engine versions, use the DescribeDBEngineVersions operation. + /// + /// + /// The following are the database engines and links to information about the + /// major and minor versions that are available with Amazon RDS. Not every database + /// engine is available for every Amazon Web Services Region. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The version number of the database engine to be used by the + /// DB instance is managed by the DB cluster. + /// + /// + /// Amazon RDS Custom for Oracle + /// + /// + /// A custom engine version (CEV) that you have previously created. This setting + /// is required for RDS Custom for Oracle. The CEV name has the following format: + /// 19.customized_string. A valid CEV name is 19.my_cev1. For more information, + /// see Creating an RDS Custom for Oracle DB instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-creating.html#custom-creating.create) + /// in the Amazon RDS User Guide. + /// + /// + /// Amazon RDS Custom for SQL Server + /// + /// + /// See RDS Custom for SQL Server general requirements (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-reqs-limits-MS.html) + /// in the Amazon RDS User Guide. + /// + /// + /// MariaDB + /// + /// + /// For information, see MariaDB on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MariaDB.html#MariaDB.Concepts.VersionMgmt) + /// in the Amazon RDS User Guide. + /// + /// + /// Microsoft SQL Server + /// + /// + /// For information, see Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) + /// in the Amazon RDS User Guide. + /// + /// + /// MySQL + /// + /// + /// For information, see MySQL on Amazon RDS Versions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) + /// in the Amazon RDS User Guide. + /// + /// + /// Oracle + /// + /// + /// For information, see Oracle Database Engine Release Notes (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.PatchComposition.html) + /// in the Amazon RDS User Guide. + /// + /// + /// PostgreSQL + /// + /// + /// For information, see Amazon RDS for PostgreSQL versions and extensions (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "engineVersion")] pub engine_version: Option, - /// The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. For information about valid IOPS values, see Amazon RDS DB instance storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) in the Amazon RDS User Guide. - /// Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 of the storage amount for the DB instance. - /// Amazon Aurora - /// Not applicable. Storage is managed by the DB cluster. + /// The amount of Provisioned IOPS (input/output operations per second) to be + /// initially allocated for the DB instance. For information about valid IOPS + /// values, see Amazon RDS DB instance storage (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html) + /// in the Amazon RDS User Guide. + /// + /// + /// Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must + /// be a multiple between .5 and 50 of the storage amount for the DB instance. + /// For SQL Server DB instances, must be a multiple between 1 and 50 of the storage + /// amount for the DB instance. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. Storage is managed by the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none")] pub iops: Option, - /// The Amazon Web Services KMS key identifier for an encrypted DB instance. - /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. - /// Amazon Aurora - /// Not applicable. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster. - /// If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. - /// Amazon RDS Custom - /// A KMS key is required for RDS Custom instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key. + /// The Amazon Web Services KMS key identifier for an encrypted DB instance. + /// + /// + /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias + /// ARN, or alias name for the KMS key. To use a KMS key in a different Amazon + /// Web Services account, specify the key ARN or alias ARN. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The Amazon Web Services KMS key identifier is managed by + /// the DB cluster. For more information, see CreateDBCluster. + /// + /// + /// If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId + /// parameter, then Amazon RDS uses your default KMS key. There is a default + /// KMS key for your Amazon Web Services account. Your Amazon Web Services account + /// has a different default KMS key for each Amazon Web Services Region. + /// + /// + /// Amazon RDS Custom + /// + /// + /// A KMS key is required for RDS Custom instances. For most RDS engines, if + /// you leave this parameter empty while enabling StorageEncrypted, the engine + /// uses the default KMS key. However, RDS Custom doesn't use the default key + /// when this parameter is empty. You must explicitly specify a key. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] pub kms_key_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyRef")] pub kms_key_ref: Option, - /// License model information for this DB instance. - /// Valid values: license-included | bring-your-own-license | general-public-license - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. + /// License model information for this DB instance. + /// + /// + /// Valid values: license-included | bring-your-own-license | general-public-license + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "licenseModel")] pub license_model: Option, - /// A value that indicates whether to manage the master user password with Amazon Web Services Secrets Manager. - /// For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide. - /// Constraints: - /// * Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. + /// A value that indicates whether to manage the master user password with Amazon + /// Web Services Secrets Manager. + /// + /// + /// For more information, see Password management with Amazon Web Services Secrets + /// Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + /// in the Amazon RDS User Guide. + /// + /// + /// Constraints: + /// + /// + /// * Can't manage the master user password with Amazon Web Services Secrets + /// Manager if MasterUserPassword is specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "manageMasterUserPassword")] pub manage_master_user_password: Option, - /// The password for the master user. The password can include any printable ASCII character except "/", """, or "@". - /// Amazon Aurora - /// Not applicable. The password for the master user is managed by the DB cluster. - /// Constraints: Can't be specified if ManageMasterUserPassword is turned on. - /// MariaDB - /// Constraints: Must contain from 8 to 41 characters. - /// Microsoft SQL Server - /// Constraints: Must contain from 8 to 128 characters. - /// MySQL - /// Constraints: Must contain from 8 to 41 characters. - /// Oracle - /// Constraints: Must contain from 8 to 30 characters. - /// PostgreSQL - /// Constraints: Must contain from 8 to 128 characters. + /// The password for the master user. The password can include any printable + /// ASCII character except "/", """, or "@". + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The password for the master user is managed by the DB cluster. + /// + /// + /// Constraints: Can't be specified if ManageMasterUserPassword is turned on. + /// + /// + /// MariaDB + /// + /// + /// Constraints: Must contain from 8 to 41 characters. + /// + /// + /// Microsoft SQL Server + /// + /// + /// Constraints: Must contain from 8 to 128 characters. + /// + /// + /// MySQL + /// + /// + /// Constraints: Must contain from 8 to 41 characters. + /// + /// + /// Oracle + /// + /// + /// Constraints: Must contain from 8 to 30 characters. + /// + /// + /// PostgreSQL + /// + /// + /// Constraints: Must contain from 8 to 128 characters. #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserPassword")] pub master_user_password: Option, - /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. - /// This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB instance. - /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. - /// If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key. - /// There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. + /// The Amazon Web Services KMS key identifier to encrypt a secret that is automatically + /// generated and managed in Amazon Web Services Secrets Manager. + /// + /// + /// This setting is valid only if the master user password is managed by RDS + /// in Amazon Web Services Secrets Manager for the DB instance. + /// + /// + /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias + /// ARN, or alias name for the KMS key. To use a KMS key in a different Amazon + /// Web Services account, specify the key ARN or alias ARN. + /// + /// + /// If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager + /// KMS key is used to encrypt the secret. If the secret is in a different Amazon + /// Web Services account, then you can't use the aws/secretsmanager KMS key to + /// encrypt the secret, and you must use a customer managed KMS key. + /// + /// + /// There is a default KMS key for your Amazon Web Services account. Your Amazon + /// Web Services account has a different default KMS key for each Amazon Web + /// Services Region. #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserSecretKMSKeyID")] pub master_user_secret_kms_key_id: Option, - /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: - /// from: name: my-api + /// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference + /// type to provide more user friendly syntax for references using 'from' field + /// Ex: + /// APIIDRef: + /// + /// + /// from: + /// name: my-api #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserSecretKMSKeyRef")] pub master_user_secret_kms_key_ref: Option, - /// The name for the master user. - /// Amazon Aurora - /// Not applicable. The name for the master user is managed by the DB cluster. - /// Amazon RDS - /// Constraints: - /// * Required. - /// * Must be 1 to 16 letters, numbers, or underscores. - /// * First character must be a letter. - /// * Can't be a reserved word for the chosen database engine. + /// The name for the master user. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The name for the master user is managed by the DB cluster. + /// + /// + /// Amazon RDS + /// + /// + /// Constraints: + /// + /// + /// * Required. + /// + /// + /// * Must be 1 to 16 letters, numbers, or underscores. + /// + /// + /// * First character must be a letter. + /// + /// + /// * Can't be a reserved word for the chosen database engine. #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUsername")] pub master_username: Option, - /// The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. - /// For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) in the Amazon RDS User Guide. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. Storage is managed by the DB cluster. + /// The upper limit in gibibytes (GiB) to which Amazon RDS can automatically + /// scale the storage of the DB instance. + /// + /// + /// For more information about this setting, including limitations that apply + /// to it, see Managing capacity automatically with Amazon RDS storage autoscaling + /// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) + /// in the Amazon RDS User Guide. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. Storage is managed by the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxAllocatedStorage")] pub max_allocated_storage: Option, - /// The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0. - /// If MonitoringRoleArn is specified, then you must set MonitoringInterval to a value other than 0. - /// This setting doesn't apply to RDS Custom. - /// Valid Values: 0, 1, 5, 10, 15, 30, 60 + /// The interval, in seconds, between points when Enhanced Monitoring metrics + /// are collected for the DB instance. To disable collection of Enhanced Monitoring + /// metrics, specify 0. The default is 0. + /// + /// + /// If MonitoringRoleArn is specified, then you must set MonitoringInterval to + /// a value other than 0. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Valid Values: 0, 1, 5, 10, 15, 30, 60 #[serde(default, skip_serializing_if = "Option::is_none", rename = "monitoringInterval")] pub monitoring_interval: Option, - /// The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For information on creating a monitoring role, see Setting Up and Enabling Enhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the Amazon RDS User Guide. - /// If MonitoringInterval is set to a value other than 0, then you must supply a MonitoringRoleArn value. - /// This setting doesn't apply to RDS Custom. + /// The ARN for the IAM role that permits RDS to send enhanced monitoring metrics + /// to Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. + /// For information on creating a monitoring role, see Setting Up and Enabling + /// Enhanced Monitoring (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) + /// in the Amazon RDS User Guide. + /// + /// + /// If MonitoringInterval is set to a value other than 0, then you must supply + /// a MonitoringRoleArn value. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "monitoringRoleARN")] pub monitoring_role_arn: Option, - /// A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster. + /// A value that indicates whether the DB instance is a Multi-AZ deployment. + /// You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ + /// deployment. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. DB instance Availability Zones (AZs) are managed by the DB + /// cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "multiAZ")] pub multi_az: Option, - /// The name of the NCHAR character set for the Oracle DB instance. - /// This parameter doesn't apply to RDS Custom. + /// The name of the NCHAR character set for the Oracle DB instance. + /// + /// + /// This parameter doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ncharCharacterSetName")] pub nchar_character_set_name: Option, - /// The network type of the DB instance. - /// Valid values: - /// * IPV4 - /// * DUAL - /// The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - /// For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the Amazon RDS User Guide. + /// The network type of the DB instance. + /// + /// + /// Valid values: + /// + /// + /// * IPV4 + /// + /// + /// * DUAL + /// + /// + /// The network type is determined by the DBSubnetGroup specified for the DB + /// instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 + /// and the IPv6 protocols (DUAL). + /// + /// + /// For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkType")] pub network_type: Option, - /// A value that indicates that the DB instance should be associated with the specified option group. - /// Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance after it is associated with a DB instance. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. + /// A value that indicates that the DB instance should be associated with the + /// specified option group. + /// + /// + /// Permanent options, such as the TDE option for Oracle Advanced Security TDE, + /// can't be removed from an option group. Also, that option group can't be removed + /// from a DB instance after it is associated with a DB instance. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "optionGroupName")] pub option_group_name: Option, - /// A value that indicates whether to enable Performance Insights for the DB instance. For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the Amazon RDS User Guide. - /// This setting doesn't apply to RDS Custom. + /// A value that indicates whether to enable Performance Insights for the DB + /// instance. For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) + /// in the Amazon RDS User Guide. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "performanceInsightsEnabled")] pub performance_insights_enabled: Option, - /// The Amazon Web Services KMS key identifier for encryption of Performance Insights data. - /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. - /// If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. - /// This setting doesn't apply to RDS Custom. + /// The Amazon Web Services KMS key identifier for encryption of Performance + /// Insights data. + /// + /// + /// The Amazon Web Services KMS key identifier is the key ARN, key ID, alias + /// ARN, or alias name for the KMS key. + /// + /// + /// If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + /// RDS uses your default KMS key. There is a default KMS key for your Amazon + /// Web Services account. Your Amazon Web Services account has a different default + /// KMS key for each Amazon Web Services Region. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "performanceInsightsKMSKeyID")] pub performance_insights_kms_key_id: Option, - /// The number of days to retain Performance Insights data. The default is 7 days. The following values are valid: - /// * 7 - /// * month * 31, where month is a number of months from 1-23 - /// * 731 - /// For example, the following values are valid: - /// * 93 (3 months * 31) - /// * 341 (11 months * 31) - /// * 589 (19 months * 31) - /// * 731 - /// If you specify a retention period such as 94, which isn't a valid value, RDS issues an error. - /// This setting doesn't apply to RDS Custom. + /// The number of days to retain Performance Insights data. The default is 7 + /// days. The following values are valid: + /// + /// + /// * 7 + /// + /// + /// * month * 31, where month is a number of months from 1-23 + /// + /// + /// * 731 + /// + /// + /// For example, the following values are valid: + /// + /// + /// * 93 (3 months * 31) + /// + /// + /// * 341 (11 months * 31) + /// + /// + /// * 589 (19 months * 31) + /// + /// + /// * 731 + /// + /// + /// If you specify a retention period such as 94, which isn't a valid value, + /// RDS issues an error. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "performanceInsightsRetentionPeriod")] pub performance_insights_retention_period: Option, - /// The port number on which the database accepts connections. - /// MySQL - /// Default: 3306 - /// Valid values: 1150-65535 - /// Type: Integer - /// MariaDB - /// Default: 3306 - /// Valid values: 1150-65535 - /// Type: Integer - /// PostgreSQL - /// Default: 5432 - /// Valid values: 1150-65535 - /// Type: Integer - /// Oracle - /// Default: 1521 - /// Valid values: 1150-65535 - /// SQL Server - /// Default: 1433 - /// Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and 49152-49156. - /// Amazon Aurora - /// Default: 3306 - /// Valid values: 1150-65535 - /// Type: Integer + /// The port number on which the database accepts connections. + /// + /// + /// MySQL + /// + /// + /// Default: 3306 + /// + /// + /// Valid values: 1150-65535 + /// + /// + /// Type: Integer + /// + /// + /// MariaDB + /// + /// + /// Default: 3306 + /// + /// + /// Valid values: 1150-65535 + /// + /// + /// Type: Integer + /// + /// + /// PostgreSQL + /// + /// + /// Default: 5432 + /// + /// + /// Valid values: 1150-65535 + /// + /// + /// Type: Integer + /// + /// + /// Oracle + /// + /// + /// Default: 1521 + /// + /// + /// Valid values: 1150-65535 + /// + /// + /// SQL Server + /// + /// + /// Default: 1433 + /// + /// + /// Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and + /// 49152-49156. + /// + /// + /// Amazon Aurora + /// + /// + /// Default: 3306 + /// + /// + /// Valid values: 1150-65535 + /// + /// + /// Type: Integer #[serde(default, skip_serializing_if = "Option::is_none")] pub port: Option, - /// When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API operation in the source Amazon Web Services Region that contains the source DB instance. - /// This setting applies only to Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions. - /// This setting applies only when replicating from a source DB instance. Source DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions and China Amazon Web Services Regions. - /// You must specify this parameter when you create an encrypted read replica from another Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are creating an encrypted read replica in the same Amazon Web Services Region. - /// The presigned URL must be a valid request for the CreateDBInstanceReadReplica API operation that can run in the source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL request must contain the following parameter values: - /// * DestinationRegion - The Amazon Web Services Region that the encrypted read replica is created in. This Amazon Web Services Region is the same one where the CreateDBInstanceReadReplica operation is called that contains this presigned URL. For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica operation in the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the CreateDBInstanceReadReplica operation in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion in the presigned URL must be set to the us-east-1 Amazon Web Services Region. - /// * KmsKeyId - The KMS key identifier for the key to use to encrypt the read replica in the destination Amazon Web Services Region. This is the same identifier for both the CreateDBInstanceReadReplica operation that is called in the destination Amazon Web Services Region, and the operation contained in the presigned URL. - /// * SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB instance to be replicated. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. - /// To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). - /// If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region. - /// SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server doesn't support cross-Region read replicas. - /// This setting doesn't apply to RDS Custom. + /// When you are creating a read replica from one Amazon Web Services GovCloud + /// (US) Region to another or from one China Amazon Web Services Region to another, + /// the URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica + /// API operation in the source Amazon Web Services Region that contains the + /// source DB instance. + /// + /// + /// This setting applies only to Amazon Web Services GovCloud (US) Regions and + /// China Amazon Web Services Regions. It's ignored in other Amazon Web Services + /// Regions. + /// + /// + /// This setting applies only when replicating from a source DB instance. Source + /// DB clusters aren't supported in Amazon Web Services GovCloud (US) Regions + /// and China Amazon Web Services Regions. + /// + /// + /// You must specify this parameter when you create an encrypted read replica + /// from another Amazon Web Services Region by using the Amazon RDS API. Don't + /// specify PreSignedUrl when you are creating an encrypted read replica in the + /// same Amazon Web Services Region. + /// + /// + /// The presigned URL must be a valid request for the CreateDBInstanceReadReplica + /// API operation that can run in the source Amazon Web Services Region that + /// contains the encrypted source DB instance. The presigned URL request must + /// contain the following parameter values: + /// + /// + /// * DestinationRegion - The Amazon Web Services Region that the encrypted + /// read replica is created in. This Amazon Web Services Region is the same + /// one where the CreateDBInstanceReadReplica operation is called that contains + /// this presigned URL. For example, if you create an encrypted DB instance + /// in the us-west-1 Amazon Web Services Region, from a source DB instance + /// in the us-east-2 Amazon Web Services Region, then you call the CreateDBInstanceReadReplica + /// operation in the us-east-1 Amazon Web Services Region and provide a presigned + /// URL that contains a call to the CreateDBInstanceReadReplica operation + /// in the us-west-2 Amazon Web Services Region. For this example, the DestinationRegion + /// in the presigned URL must be set to the us-east-1 Amazon Web Services + /// Region. + /// + /// + /// * KmsKeyId - The KMS key identifier for the key to use to encrypt the + /// read replica in the destination Amazon Web Services Region. This is the + /// same identifier for both the CreateDBInstanceReadReplica operation that + /// is called in the destination Amazon Web Services Region, and the operation + /// contained in the presigned URL. + /// + /// + /// * SourceDBInstanceIdentifier - The DB instance identifier for the encrypted + /// DB instance to be replicated. This identifier must be in the Amazon Resource + /// Name (ARN) format for the source Amazon Web Services Region. For example, + /// if you are creating an encrypted read replica from a DB instance in the + /// us-west-2 Amazon Web Services Region, then your SourceDBInstanceIdentifier + /// looks like the following example: arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. + /// + /// + /// To learn how to generate a Signature Version 4 signed request, see Authenticating + /// Requests: Using Query Parameters (Amazon Web Services Signature Version 4) + /// (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) + /// and Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + /// + /// + /// If you are using an Amazon Web Services SDK tool or the CLI, you can specify + /// SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl + /// manually. Specifying SourceRegion autogenerates a presigned URL that is a + /// valid request for the operation that can run in the source Amazon Web Services + /// Region. + /// + /// + /// SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server + /// doesn't support cross-Region read replicas. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preSignedURL")] pub pre_signed_url: Option, - /// The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. For more information, see Backup window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) in the Amazon RDS User Guide. - /// Amazon Aurora - /// Not applicable. The daily time range for creating automated backups is managed by the DB cluster. - /// Constraints: - /// * Must be in the format hh24:mi-hh24:mi. - /// * Must be in Universal Coordinated Time (UTC). - /// * Must not conflict with the preferred maintenance window. - /// * Must be at least 30 minutes. + /// The daily time range during which automated backups are created if automated + /// backups are enabled, using the BackupRetentionPeriod parameter. The default + /// is a 30-minute window selected at random from an 8-hour block of time for + /// each Amazon Web Services Region. For more information, see Backup window + /// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) + /// in the Amazon RDS User Guide. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The daily time range for creating automated backups is managed + /// by the DB cluster. + /// + /// + /// Constraints: + /// + /// + /// * Must be in the format hh24:mi-hh24:mi. + /// + /// + /// * Must be in Universal Coordinated Time (UTC). + /// + /// + /// * Must not conflict with the preferred maintenance window. + /// + /// + /// * Must be at least 30 minutes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredBackupWindow")] pub preferred_backup_window: Option, - /// The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance). - /// Format: ddd:hh24:mi-ddd:hh24:mi - /// The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. - /// Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. - /// Constraints: Minimum 30-minute window. + /// The time range each week during which system maintenance can occur, in Universal + /// Coordinated Time (UTC). For more information, see Amazon RDS Maintenance + /// Window (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance). + /// + /// + /// Format: ddd:hh24:mi-ddd:hh24:mi + /// + /// + /// The default is a 30-minute window selected at random from an 8-hour block + /// of time for each Amazon Web Services Region, occurring on a random day of + /// the week. + /// + /// + /// Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. + /// + /// + /// Constraints: Minimum 30-minute window. #[serde(default, skip_serializing_if = "Option::is_none", rename = "preferredMaintenanceWindow")] pub preferred_maintenance_window: Option, - /// The number of CPU cores and the number of threads per core for the DB instance class of the DB instance. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. + /// The number of CPU cores and the number of threads per core for the DB instance + /// class of the DB instance. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "processorFeatures")] pub processor_features: Option>, - /// A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) in the Amazon Aurora User Guide. - /// This setting doesn't apply to RDS Custom. - /// Default: 1 - /// Valid Values: 0 - 15 + /// A value that specifies the order in which an Aurora Replica is promoted to + /// the primary instance after a failure of the existing primary instance. For + /// more information, see Fault Tolerance for an Aurora DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.FaultTolerance) + /// in the Amazon Aurora User Guide. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Default: 1 + /// + /// + /// Valid Values: 0 - 15 #[serde(default, skip_serializing_if = "Option::is_none", rename = "promotionTier")] pub promotion_tier: Option, - /// A value that indicates whether the DB instance is publicly accessible. - /// When the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB instance's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB instance's VPC. Access to the DB instance is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB instance doesn't permit it. - /// When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address. - /// Default: The default behavior varies depending on whether DBSubnetGroupName is specified. - /// If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: - /// * If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB instance is private. - /// * If the default VPC in the target Region has an internet gateway attached to it, the DB instance is public. - /// If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: - /// * If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB instance is private. - /// * If the subnets are part of a VPC that has an internet gateway attached to it, the DB instance is public. + /// A value that indicates whether the DB instance is publicly accessible. + /// + /// + /// When the DB instance is publicly accessible, its Domain Name System (DNS) + /// endpoint resolves to the private IP address from within the DB instance's + /// virtual private cloud (VPC). It resolves to the public IP address from outside + /// of the DB instance's VPC. Access to the DB instance is ultimately controlled + /// by the security group it uses. That public access is not permitted if the + /// security group assigned to the DB instance doesn't permit it. + /// + /// + /// When the DB instance isn't publicly accessible, it is an internal DB instance + /// with a DNS name that resolves to a private IP address. + /// + /// + /// Default: The default behavior varies depending on whether DBSubnetGroupName + /// is specified. + /// + /// + /// If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, + /// the following applies: + /// + /// + /// * If the default VPC in the target Region doesn’t have an internet gateway + /// attached to it, the DB instance is private. + /// + /// + /// * If the default VPC in the target Region has an internet gateway attached + /// to it, the DB instance is public. + /// + /// + /// If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, + /// the following applies: + /// + /// + /// * If the subnets are part of a VPC that doesn’t have an internet gateway + /// attached to it, the DB instance is private. + /// + /// + /// * If the subnets are part of a VPC that has an internet gateway attached + /// to it, the DB instance is public. #[serde(default, skip_serializing_if = "Option::is_none", rename = "publiclyAccessible")] pub publicly_accessible: Option, - /// The open mode of the replica database: mounted or read-only. - /// This parameter is only supported for Oracle DB instances. - /// Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. - /// You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with Oracle Read Replicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) in the Amazon RDS User Guide. - /// For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually. + /// The open mode of the replica database: mounted or read-only. + /// + /// + /// This parameter is only supported for Oracle DB instances. + /// + /// + /// Mounted DB replicas are included in Oracle Database Enterprise Edition. The + /// main use case for mounted replicas is cross-Region disaster recovery. The + /// primary database doesn't use Active Data Guard to transmit information to + /// the mounted replica. Because it doesn't accept user connections, a mounted + /// replica can't serve a read-only workload. + /// + /// + /// You can create a combination of mounted and read-only DB replicas for the + /// same primary DB instance. For more information, see Working with Oracle Read + /// Replicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) + /// in the Amazon RDS User Guide. + /// + /// + /// For RDS Custom, you must specify this parameter and set it to mounted. The + /// value won't be set by default. After replica creation, you can manage the + /// open mode manually. #[serde(default, skip_serializing_if = "Option::is_none", rename = "replicaMode")] pub replica_mode: Option, - /// The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, with the exception of Oracle and SQL Server, which can have up to five. - /// Constraints: - /// * Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL Server DB instance. - /// * Can't be specified if the SourceDBClusterIdentifier parameter is also specified. - /// * For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.limitations.html#oracle-read-replicas.limitations.versions-and-licenses) in the Amazon RDS User Guide. - /// * For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.html#SQLServer.ReadReplicas.Limitations) in the Amazon RDS User Guide. - /// * The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0. - /// * If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier. - /// * If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas. + /// The identifier of the DB instance that will act as the source for the read + /// replica. Each DB instance can have up to 15 read replicas, with the exception + /// of Oracle and SQL Server, which can have up to five. + /// + /// + /// Constraints: + /// + /// + /// * Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, + /// or SQL Server DB instance. + /// + /// + /// * Can't be specified if the SourceDBClusterIdentifier parameter is also + /// specified. + /// + /// + /// * For the limitations of Oracle read replicas, see Version and licensing + /// considerations for RDS for Oracle replicas (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.limitations.html#oracle-read-replicas.limitations.versions-and-licenses) + /// in the Amazon RDS User Guide. + /// + /// + /// * For the limitations of SQL Server read replicas, see Read replica limitations + /// with SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.html#SQLServer.ReadReplicas.Limitations) + /// in the Amazon RDS User Guide. + /// + /// + /// * The specified DB instance must have automatic backups enabled, that + /// is, its backup retention period must be greater than 0. + /// + /// + /// * If the source DB instance is in the same Amazon Web Services Region + /// as the read replica, specify a valid DB instance identifier. + /// + /// + /// * If the source DB instance is in a different Amazon Web Services Region + /// from the read replica, specify a valid DB instance ARN. For more information, + /// see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) + /// in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS + /// Custom, which don't support cross-Region replicas. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceDBInstanceIdentifier")] pub source_db_instance_identifier: Option, - /// SourceRegion is the source region where the resource exists. This is not sent over the wire and is only used for presigning. This value should always have the same region as the source ARN. + /// SourceRegion is the source region where the resource exists. This is not + /// sent over the wire and is only used for presigning. This value should always + /// have the same region as the source ARN. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceRegion")] pub source_region: Option, - /// A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted. - /// For RDS Custom instances, either set this parameter to true or leave it unset. If you set this parameter to false, RDS reports an error. - /// Amazon Aurora - /// Not applicable. The encryption for DB instances is managed by the DB cluster. + /// A value that indicates whether the DB instance is encrypted. By default, + /// it isn't encrypted. + /// + /// + /// For RDS Custom instances, either set this parameter to true or leave it unset. + /// If you set this parameter to false, RDS reports an error. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The encryption for DB instances is managed by the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageEncrypted")] pub storage_encrypted: Option, - /// Specifies the storage throughput value for the DB instance. - /// This setting applies only to the gp3 storage type. - /// This setting doesn't apply to RDS Custom or Amazon Aurora. + /// Specifies the storage throughput value for the DB instance. + /// + /// + /// This setting applies only to the gp3 storage type. + /// + /// + /// This setting doesn't apply to RDS Custom or Amazon Aurora. #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageThroughput")] pub storage_throughput: Option, - /// Specifies the storage type to be associated with the DB instance. - /// Valid values: gp2 | gp3 | io1 | standard - /// If you specify io1 or gp3, you must also include a value for the Iops parameter. - /// Default: io1 if the Iops parameter is specified, otherwise gp2 - /// Amazon Aurora - /// Not applicable. Storage is managed by the DB cluster. + /// Specifies the storage type to be associated with the DB instance. + /// + /// + /// Valid values: gp2 | gp3 | io1 | standard + /// + /// + /// If you specify io1 or gp3, you must also include a value for the Iops parameter. + /// + /// + /// Default: io1 if the Iops parameter is specified, otherwise gp2 + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. Storage is managed by the DB cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageType")] pub storage_type: Option, /// Tags to assign to the DB instance. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// The ARN from the key store with which to associate the instance for TDE encryption. - /// This setting doesn't apply to RDS Custom. - /// Amazon Aurora - /// Not applicable. + /// The ARN from the key store with which to associate the instance for TDE encryption. + /// + /// + /// This setting doesn't apply to RDS Custom. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tdeCredentialARN")] pub tde_credential_arn: Option, - /// The password for the given ARN from the key store in order to access the device. - /// This setting doesn't apply to RDS Custom. + /// The password for the given ARN from the key store in order to access the + /// device. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tdeCredentialPassword")] pub tde_credential_password: Option, - /// The time zone of the DB instance. The time zone parameter is currently supported only by Microsoft SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone). + /// The time zone of the DB instance. The time zone parameter is currently supported + /// only by Microsoft SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone). #[serde(default, skip_serializing_if = "Option::is_none")] pub timezone: Option, - /// A value that indicates whether the DB instance class of the DB instance uses its default processor features. - /// This setting doesn't apply to RDS Custom. + /// A value that indicates whether the DB instance class of the DB instance uses + /// its default processor features. + /// + /// + /// This setting doesn't apply to RDS Custom. #[serde(default, skip_serializing_if = "Option::is_none", rename = "useDefaultProcessorFeatures")] pub use_default_processor_features: Option, - /// A list of Amazon EC2 VPC security groups to associate with this DB instance. - /// Amazon Aurora - /// Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. - /// Default: The default EC2 VPC security group for the DB subnet group's VPC. + /// A list of Amazon EC2 VPC security groups to associate with this DB instance. + /// + /// + /// Amazon Aurora + /// + /// + /// Not applicable. The associated list of EC2 VPC security groups is managed + /// by the DB cluster. + /// + /// + /// Default: The default EC2 VPC security group for the DB subnet group's VPC. #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcSecurityGroupIDs")] pub vpc_security_group_i_ds: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcSecurityGroupRefs")] pub vpc_security_group_refs: Option>, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceDbParameterGroupRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceDbParameterGroupRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceDbSubnetGroupRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceDbSubnetGroupRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceKmsKeyRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceKmsKeyRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// The password for the master user. The password can include any printable ASCII character except "/", """, or "@". -/// Amazon Aurora -/// Not applicable. The password for the master user is managed by the DB cluster. -/// Constraints: Can't be specified if ManageMasterUserPassword is turned on. -/// MariaDB -/// Constraints: Must contain from 8 to 41 characters. -/// Microsoft SQL Server -/// Constraints: Must contain from 8 to 128 characters. -/// MySQL -/// Constraints: Must contain from 8 to 41 characters. -/// Oracle -/// Constraints: Must contain from 8 to 30 characters. -/// PostgreSQL -/// Constraints: Must contain from 8 to 128 characters. +/// The password for the master user. The password can include any printable +/// ASCII character except "/", """, or "@". +/// +/// +/// Amazon Aurora +/// +/// +/// Not applicable. The password for the master user is managed by the DB cluster. +/// +/// +/// Constraints: Can't be specified if ManageMasterUserPassword is turned on. +/// +/// +/// MariaDB +/// +/// +/// Constraints: Must contain from 8 to 41 characters. +/// +/// +/// Microsoft SQL Server +/// +/// +/// Constraints: Must contain from 8 to 128 characters. +/// +/// +/// MySQL +/// +/// +/// Constraints: Must contain from 8 to 41 characters. +/// +/// +/// Oracle +/// +/// +/// Constraints: Must contain from 8 to 30 characters. +/// +/// +/// PostgreSQL +/// +/// +/// Constraints: Must contain from 8 to 128 characters. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceMasterUserPassword { /// Key is the key within the secret @@ -657,40 +1740,92 @@ pub struct DBInstanceMasterUserPassword { pub namespace: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceMasterUserSecretKmsKeyRef { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceMasterUserSecretKmsKeyRefFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// Contains the processor features of a DB instance class. -/// To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter. -/// You can set the processor features of the DB instance class for a DB instance when you call one of the following actions: -/// * CreateDBInstance -/// * ModifyDBInstance -/// * RestoreDBInstanceFromDBSnapshot -/// * RestoreDBInstanceFromS3 -/// * RestoreDBInstanceToPointInTime -/// You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter. -/// In addition, you can use the following actions for DB instance class processor information: -/// * DescribeDBInstances -/// * DescribeDBSnapshots -/// * DescribeValidDBInstanceModifications -/// If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met: -/// * You are accessing an Oracle DB instance. -/// * Your Oracle DB instance class supports configuring the number of CPU cores and threads per core. -/// * The current number CPU cores and threads is set to a non-default value. -/// For more information, see Configuring the Processor of the DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor) in the Amazon RDS User Guide. +/// Contains the processor features of a DB instance class. +/// +/// +/// To specify the number of CPU cores, use the coreCount feature name for the +/// Name parameter. To specify the number of threads per core, use the threadsPerCore +/// feature name for the Name parameter. +/// +/// +/// You can set the processor features of the DB instance class for a DB instance +/// when you call one of the following actions: +/// +/// +/// * CreateDBInstance +/// +/// +/// * ModifyDBInstance +/// +/// +/// * RestoreDBInstanceFromDBSnapshot +/// +/// +/// * RestoreDBInstanceFromS3 +/// +/// +/// * RestoreDBInstanceToPointInTime +/// +/// +/// You can view the valid processor values for a particular instance class by +/// calling the DescribeOrderableDBInstanceOptions action and specifying the +/// instance class for the DBInstanceClass parameter. +/// +/// +/// In addition, you can use the following actions for DB instance class processor +/// information: +/// +/// +/// * DescribeDBInstances +/// +/// +/// * DescribeDBSnapshots +/// +/// +/// * DescribeValidDBInstanceModifications +/// +/// +/// If you call DescribeDBInstances, ProcessorFeature returns non-null values +/// only if the following conditions are met: +/// +/// +/// * You are accessing an Oracle DB instance. +/// +/// +/// * Your Oracle DB instance class supports configuring the number of CPU +/// cores and threads per core. +/// +/// +/// * The current number CPU cores and threads is set to a non-default value. +/// +/// +/// For more information, see Configuring the Processor of the DB Instance Class +/// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceProcessorFeatures { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -699,8 +1834,11 @@ pub struct DBInstanceProcessorFeatures { pub value: Option, } -/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. -/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide. +/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +/// +/// +/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -709,16 +1847,24 @@ pub struct DBInstanceTags { pub value: Option, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceVpcSecurityGroupRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceVpcSecurityGroupRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -728,19 +1874,27 @@ pub struct DBInstanceVpcSecurityGroupRefsFrom { /// DBInstanceStatus defines the observed state of DBInstance #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// Indicates whether engine-native audit fields are included in the database activity stream. + /// Indicates whether engine-native audit fields are included in the database + /// activity stream. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamEngineNativeAuditFieldsIncluded")] pub activity_stream_engine_native_audit_fields_included: Option, - /// The Amazon Web Services KMS key identifier used for encrypting messages in the database activity stream. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. + /// The Amazon Web Services KMS key identifier used for encrypting messages in + /// the database activity stream. The Amazon Web Services KMS key identifier + /// is the key ARN, key ID, alias ARN, or alias name for the KMS key. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamKMSKeyID")] pub activity_stream_kms_key_id: Option, - /// The name of the Amazon Kinesis data stream used for the database activity stream. + /// The name of the Amazon Kinesis data stream used for the database activity + /// stream. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamKinesisStreamName")] pub activity_stream_kinesis_stream_name: Option, - /// The mode of the database activity stream. Database events such as a change or access generate an activity stream event. RDS for Oracle always handles these events asynchronously. + /// The mode of the database activity stream. Database events such as a change + /// or access generate an activity stream event. RDS for Oracle always handles + /// these events asynchronously. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamMode")] pub activity_stream_mode: Option, /// The status of the policy state of the activity stream. @@ -749,133 +1903,213 @@ pub struct DBInstanceStatus { /// The status of the database activity stream. #[serde(default, skip_serializing_if = "Option::is_none", rename = "activityStreamStatus")] pub activity_stream_status: Option, - /// The Amazon Web Services Identity and Access Management (IAM) roles associated with the DB instance. + /// The Amazon Web Services Identity and Access Management (IAM) roles associated + /// with the DB instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "associatedRoles")] pub associated_roles: Option>, /// The time when a stopped DB instance is restarted automatically. #[serde(default, skip_serializing_if = "Option::is_none", rename = "automaticRestartTime")] pub automatic_restart_time: Option, - /// The automation mode of the RDS Custom DB instance: full or all paused. If full, the DB instance automates monitoring and instance recovery. If all paused, the instance pauses automation for the duration set by --resume-full-automation-mode-minutes. + /// The automation mode of the RDS Custom DB instance: full or all paused. If + /// full, the DB instance automates monitoring and instance recovery. If all + /// paused, the instance pauses automation for the duration set by --resume-full-automation-mode-minutes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "automationMode")] pub automation_mode: Option, - /// The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup. + /// The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services + /// Backup. #[serde(default, skip_serializing_if = "Option::is_none", rename = "awsBackupRecoveryPointARN")] pub aws_backup_recovery_point_arn: Option, /// The details of the DB instance's server certificate. #[serde(default, skip_serializing_if = "Option::is_none", rename = "certificateDetails")] pub certificate_details: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance. - /// A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the DB instance from outside of its virtual private cloud (VPC) on your local network. - /// For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. - /// For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) in the Amazon Web Services Outposts User Guide. + /// Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS + /// on Outposts DB instance. + /// + /// + /// A CoIP provides local or external connectivity to resources in your Outpost + /// subnets through your on-premises network. For some use cases, a CoIP can + /// provide lower latency for connections to the DB instance from outside of + /// its virtual private cloud (VPC) on your local network. + /// + /// + /// For more information about RDS on Outposts, see Working with Amazon RDS on + /// Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + /// in the Amazon RDS User Guide. + /// + /// + /// For more information about CoIPs, see Customer-owned IP addresses (https://docs.aws.amazon.com/outposts/latest/userguide/routing.html#ip-addressing) + /// in the Amazon Web Services Outposts User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "customerOwnedIPEnabled")] pub customer_owned_ip_enabled: Option, /// The list of replicated automated backups associated with the DB instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbInstanceAutomatedBackupsReplications")] pub db_instance_automated_backups_replications: Option>, - /// Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port. + /// Specifies the port that the DB instance listens on. If the DB instance is + /// part of a DB cluster, this can be a different port than the DB cluster port. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbInstancePort")] pub db_instance_port: Option, - /// Specifies the current state of this database. - /// For information about DB instance statuses, see Viewing DB instance status (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html#Overview.DBInstance.Status) in the Amazon RDS User Guide. + /// Specifies the current state of this database. + /// + /// + /// For information about DB instance statuses, see Viewing DB instance status + /// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/accessing-monitoring.html#Overview.DBInstance.Status) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbInstanceStatus")] pub db_instance_status: Option, /// Provides the list of DB parameter groups applied to this DB instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbParameterGroups")] pub db_parameter_groups: Option>, - /// Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group. + /// Specifies information on the subnet group associated with the DB instance, + /// including the name, description, and subnets in the subnet group. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSubnetGroup")] pub db_subnet_group: Option, - /// The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle SID is also the name of the CDB. This setting is valid for RDS Custom only. + /// The Oracle system ID (Oracle SID) for a container database (CDB). The Oracle + /// SID is also the name of the CDB. This setting is valid for RDS Custom only. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSystemID")] pub db_system_id: Option, - /// The Amazon Web Services Region-unique, immutable identifier for the DB instance. This identifier is found in Amazon Web Services CloudTrail log entries whenever the Amazon Web Services KMS key for the DB instance is accessed. + /// The Amazon Web Services Region-unique, immutable identifier for the DB instance. + /// This identifier is found in Amazon Web Services CloudTrail log entries whenever + /// the Amazon Web Services KMS key for the DB instance is accessed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbiResourceID")] pub dbi_resource_id: Option, /// The Active Directory Domain membership records associated with the DB instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainMemberships")] pub domain_memberships: Option>, - /// A list of log types that this DB instance is configured to export to CloudWatch Logs. - /// Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html) in the Amazon RDS User Guide. + /// A list of log types that this DB instance is configured to export to CloudWatch + /// Logs. + /// + /// + /// Log types vary by DB engine. For information about the log types for each + /// DB engine, see Amazon RDS Database Log Files (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enabledCloudwatchLogsExports")] pub enabled_cloudwatch_logs_exports: Option>, - /// Specifies the connection endpoint. - /// The endpoint might not be shown for instances whose status is creating. + /// Specifies the connection endpoint. + /// + /// + /// The endpoint might not be shown for instances whose status is creating. #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoint: Option, - /// The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that receives the Enhanced Monitoring metrics data for the DB instance. + /// The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that + /// receives the Enhanced Monitoring metrics data for the DB instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enhancedMonitoringResourceARN")] pub enhanced_monitoring_resource_arn: Option, - /// True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false. - /// IAM database authentication can be enabled for the following database engines - /// * For MySQL 5.6, minor version 5.6.34 or higher - /// * For MySQL 5.7, minor version 5.7.16 or higher - /// * Aurora 5.6 or higher. To enable IAM database authentication for Aurora, see DBCluster Type. + /// True if mapping of Amazon Web Services Identity and Access Management (IAM) + /// accounts to database accounts is enabled, and otherwise false. + /// + /// + /// IAM database authentication can be enabled for the following database engines + /// + /// + /// * For MySQL 5.6, minor version 5.6.34 or higher + /// + /// + /// * For MySQL 5.7, minor version 5.7.16 or higher + /// + /// + /// * Aurora 5.6 or higher. To enable IAM database authentication for Aurora, + /// see DBCluster Type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "iamDatabaseAuthenticationEnabled")] pub iam_database_authentication_enabled: Option, /// Provides the date and time the DB instance was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCreateTime")] pub instance_create_time: Option, - /// Specifies the latest time to which a database can be restored with point-in-time restore. + /// Specifies the latest time to which a database can be restored with point-in-time + /// restore. #[serde(default, skip_serializing_if = "Option::is_none", rename = "latestRestorableTime")] pub latest_restorable_time: Option, /// Specifies the listener connection endpoint for SQL Server Always On. #[serde(default, skip_serializing_if = "Option::is_none", rename = "listenerEndpoint")] pub listener_endpoint: Option, - /// Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. - /// For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide. + /// Contains the secret managed by RDS in Amazon Web Services Secrets Manager + /// for the master user password. + /// + /// + /// For more information, see Password management with Amazon Web Services Secrets + /// Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "masterUserSecret")] pub master_user_secret: Option, /// Provides the list of option group memberships for this DB instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "optionGroupMemberships")] pub option_group_memberships: Option>, - /// A value that specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements. + /// A value that specifies that changes to the DB instance are pending. This + /// element is only included when changes are pending. Specific changes are identified + /// by subelements. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingModifiedValues")] pub pending_modified_values: Option, - /// Contains one or more identifiers of Aurora DB clusters to which the RDS DB instance is replicated as a read replica. For example, when you create an Aurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB cluster for the Aurora read replica is shown. This output doesn't contain information about cross-Region Aurora read replicas. - /// Currently, each RDS DB instance can have only one Aurora read replica. + /// Contains one or more identifiers of Aurora DB clusters to which the RDS DB + /// instance is replicated as a read replica. For example, when you create an + /// Aurora read replica of an RDS for MySQL DB instance, the Aurora MySQL DB + /// cluster for the Aurora read replica is shown. This output doesn't contain + /// information about cross-Region Aurora read replicas. + /// + /// + /// Currently, each RDS DB instance can have only one Aurora read replica. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readReplicaDBClusterIdentifiers")] pub read_replica_db_cluster_identifiers: Option>, - /// Contains one or more identifiers of the read replicas associated with this DB instance. + /// Contains one or more identifiers of the read replicas associated with this + /// DB instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readReplicaDBInstanceIdentifiers")] pub read_replica_db_instance_identifiers: Option>, - /// Contains the identifier of the source DB cluster if this DB instance is a read replica. + /// Contains the identifier of the source DB cluster if this DB instance is a + /// read replica. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readReplicaSourceDBClusterIdentifier")] pub read_replica_source_db_cluster_identifier: Option, - /// Contains the identifier of the source DB instance if this DB instance is a read replica. + /// Contains the identifier of the source DB instance if this DB instance is + /// a read replica. #[serde(default, skip_serializing_if = "Option::is_none", rename = "readReplicaSourceDBInstanceIdentifier")] pub read_replica_source_db_instance_identifier: Option, - /// The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation. The minimum value is 60 (default). The maximum value is 1,440. + /// The number of minutes to pause the automation. When the time period ends, + /// RDS Custom resumes full automation. The minimum value is 60 (default). The + /// maximum value is 1,440. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resumeFullAutomationModeTime")] pub resume_full_automation_mode_time: Option, - /// If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support. + /// If present, specifies the name of the secondary Availability Zone for a DB + /// instance with multi-AZ support. #[serde(default, skip_serializing_if = "Option::is_none", rename = "secondaryAvailabilityZone")] pub secondary_availability_zone: Option, - /// The status of a read replica. If the instance isn't a read replica, this is blank. + /// The status of a read replica. If the instance isn't a read replica, this + /// is blank. #[serde(default, skip_serializing_if = "Option::is_none", rename = "statusInfos")] pub status_infos: Option>, - /// Provides a list of VPC security group elements that the DB instance belongs to. + /// Provides a list of VPC security group elements that the DB instance belongs + /// to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcSecurityGroups")] pub vpc_security_groups: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Describes an Amazon Web Services Identity and Access Management (IAM) role that is associated with a DB instance. +/// Describes an Amazon Web Services Identity and Access Management (IAM) role +/// that is associated with a DB instance. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusAssociatedRoles { #[serde(default, skip_serializing_if = "Option::is_none", rename = "featureName")] @@ -895,7 +2129,9 @@ pub struct DBInstanceStatusCertificateDetails { pub valid_till: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusConditions { /// Last time the condition transitioned from one status to another. @@ -914,21 +2150,37 @@ pub struct DBInstanceStatusConditions { pub r#type: String, } -/// Automated backups of a DB instance replicated to another Amazon Web Services Region. They consist of system backups, transaction logs, and database instance properties. +/// Automated backups of a DB instance replicated to another Amazon Web Services +/// Region. They consist of system backups, transaction logs, and database instance +/// properties. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusDbInstanceAutomatedBackupsReplications { #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbInstanceAutomatedBackupsARN")] pub db_instance_automated_backups_arn: Option, } -/// The status of the DB parameter group. -/// This data type is used as a response element in the following actions: -/// * CreateDBInstance -/// * CreateDBInstanceReadReplica -/// * DeleteDBInstance -/// * ModifyDBInstance -/// * RebootDBInstance -/// * RestoreDBInstanceFromDBSnapshot +/// The status of the DB parameter group. +/// +/// +/// This data type is used as a response element in the following actions: +/// +/// +/// * CreateDBInstance +/// +/// +/// * CreateDBInstanceReadReplica +/// +/// +/// * DeleteDBInstance +/// +/// +/// * ModifyDBInstance +/// +/// +/// * RebootDBInstance +/// +/// +/// * RestoreDBInstanceFromDBSnapshot #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusDbParameterGroups { #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbParameterGroupName")] @@ -937,7 +2189,8 @@ pub struct DBInstanceStatusDbParameterGroups { pub parameter_apply_status: Option, } -/// Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group. +/// Specifies information on the subnet group associated with the DB instance, +/// including the name, description, and subnets in the subnet group. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusDbSubnetGroup { #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbSubnetGroupARN")] @@ -956,40 +2209,56 @@ pub struct DBInstanceStatusDbSubnetGroup { pub vpc_id: Option, } -/// This data type is used as a response element for the DescribeDBSubnetGroups operation. +/// This data type is used as a response element for the DescribeDBSubnetGroups +/// operation. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusDbSubnetGroupSubnets { - /// Contains Availability Zone information. - /// This data type is used as an element in the OrderableDBInstanceOption data type. + /// Contains Availability Zone information. + /// + /// + /// This data type is used as an element in the OrderableDBInstanceOption data + /// type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetAvailabilityZone")] pub subnet_availability_zone: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetIdentifier")] pub subnet_identifier: Option, - /// A data type that represents an Outpost. - /// For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. + /// A data type that represents an Outpost. + /// + /// + /// For more information about RDS on Outposts, see Amazon RDS on Amazon Web + /// Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetOutpost")] pub subnet_outpost: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetStatus")] pub subnet_status: Option, } -/// Contains Availability Zone information. -/// This data type is used as an element in the OrderableDBInstanceOption data type. +/// Contains Availability Zone information. +/// +/// +/// This data type is used as an element in the OrderableDBInstanceOption data +/// type. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusDbSubnetGroupSubnetsSubnetAvailabilityZone { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// A data type that represents an Outpost. -/// For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. +/// A data type that represents an Outpost. +/// +/// +/// For more information about RDS on Outposts, see Amazon RDS on Amazon Web +/// Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusDbSubnetGroupSubnetsSubnetOutpost { #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, } -/// An Active Directory Domain membership record associated with the DB instance or cluster. +/// An Active Directory Domain membership record associated with the DB instance +/// or cluster. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusDomainMemberships { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -1002,8 +2271,10 @@ pub struct DBInstanceStatusDomainMemberships { pub status: Option, } -/// Specifies the connection endpoint. -/// The endpoint might not be shown for instances whose status is creating. +/// Specifies the connection endpoint. +/// +/// +/// The endpoint might not be shown for instances whose status is creating. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusEndpoint { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -1025,8 +2296,13 @@ pub struct DBInstanceStatusListenerEndpoint { pub port: Option, } -/// Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password. -/// For more information, see Password management with Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the Amazon RDS User Guide. +/// Contains the secret managed by RDS in Amazon Web Services Secrets Manager +/// for the master user password. +/// +/// +/// For more information, see Password management with Amazon Web Services Secrets +/// Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusMasterUserSecret { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] @@ -1046,7 +2322,9 @@ pub struct DBInstanceStatusOptionGroupMemberships { pub status: Option, } -/// A value that specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements. +/// A value that specifies that changes to the DB instance are pending. This +/// element is only included when changes are pending. Specific changes are identified +/// by subelements. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusPendingModifiedValues { #[serde(default, skip_serializing_if = "Option::is_none", rename = "allocatedStorage")] @@ -1075,7 +2353,8 @@ pub struct DBInstanceStatusPendingModifiedValues { pub master_user_password: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "multiAZ")] pub multi_az: Option, - /// A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated. + /// A list of the log types whose configuration is still pending. In other words, + /// these log types are in the process of being activated or deactivated. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingCloudwatchLogsExports")] pub pending_cloudwatch_logs_exports: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -1090,7 +2369,8 @@ pub struct DBInstanceStatusPendingModifiedValues { pub storage_type: Option, } -/// A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated. +/// A list of the log types whose configuration is still pending. In other words, +/// these log types are in the process of being activated or deactivated. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusPendingModifiedValuesPendingCloudwatchLogsExports { #[serde(default, skip_serializing_if = "Option::is_none", rename = "logTypesToDisable")] @@ -1099,24 +2379,68 @@ pub struct DBInstanceStatusPendingModifiedValuesPendingCloudwatchLogsExports { pub log_types_to_enable: Option>, } -/// Contains the processor features of a DB instance class. -/// To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter. -/// You can set the processor features of the DB instance class for a DB instance when you call one of the following actions: -/// * CreateDBInstance -/// * ModifyDBInstance -/// * RestoreDBInstanceFromDBSnapshot -/// * RestoreDBInstanceFromS3 -/// * RestoreDBInstanceToPointInTime -/// You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter. -/// In addition, you can use the following actions for DB instance class processor information: -/// * DescribeDBInstances -/// * DescribeDBSnapshots -/// * DescribeValidDBInstanceModifications -/// If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met: -/// * You are accessing an Oracle DB instance. -/// * Your Oracle DB instance class supports configuring the number of CPU cores and threads per core. -/// * The current number CPU cores and threads is set to a non-default value. -/// For more information, see Configuring the Processor of the DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor) in the Amazon RDS User Guide. +/// Contains the processor features of a DB instance class. +/// +/// +/// To specify the number of CPU cores, use the coreCount feature name for the +/// Name parameter. To specify the number of threads per core, use the threadsPerCore +/// feature name for the Name parameter. +/// +/// +/// You can set the processor features of the DB instance class for a DB instance +/// when you call one of the following actions: +/// +/// +/// * CreateDBInstance +/// +/// +/// * ModifyDBInstance +/// +/// +/// * RestoreDBInstanceFromDBSnapshot +/// +/// +/// * RestoreDBInstanceFromS3 +/// +/// +/// * RestoreDBInstanceToPointInTime +/// +/// +/// You can view the valid processor values for a particular instance class by +/// calling the DescribeOrderableDBInstanceOptions action and specifying the +/// instance class for the DBInstanceClass parameter. +/// +/// +/// In addition, you can use the following actions for DB instance class processor +/// information: +/// +/// +/// * DescribeDBInstances +/// +/// +/// * DescribeDBSnapshots +/// +/// +/// * DescribeValidDBInstanceModifications +/// +/// +/// If you call DescribeDBInstances, ProcessorFeature returns non-null values +/// only if the following conditions are met: +/// +/// +/// * You are accessing an Oracle DB instance. +/// +/// +/// * Your Oracle DB instance class supports configuring the number of CPU +/// cores and threads per core. +/// +/// +/// * The current number CPU cores and threads is set to a non-default value. +/// +/// +/// For more information, see Configuring the Processor of the DB Instance Class +/// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusPendingModifiedValuesProcessorFeatures { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -1138,7 +2462,8 @@ pub struct DBInstanceStatusStatusInfos { pub status_type: Option, } -/// This data type is used as a response element for queries on VPC security group membership. +/// This data type is used as a response element for queries on VPC security +/// group membership. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBInstanceStatusVpcSecurityGroups { #[serde(default, skip_serializing_if = "Option::is_none")] diff --git a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbparametergroups.rs b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbparametergroups.rs index 4eacb2917..1e68f99bf 100644 --- a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbparametergroups.rs +++ b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbparametergroups.rs @@ -6,9 +6,14 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// DBParameterGroupSpec defines the desired state of DBParameterGroup. -/// Contains the details of an Amazon RDS DB parameter group. -/// This data type is used as a response element in the DescribeDBParameterGroups action. +/// DBParameterGroupSpec defines the desired state of DBParameterGroup. +/// +/// +/// Contains the details of an Amazon RDS DB parameter group. +/// +/// +/// This data type is used as a response element in the DescribeDBParameterGroups +/// action. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "rds.services.k8s.aws", version = "v1alpha1", kind = "DBParameterGroup", plural = "dbparametergroups")] #[kube(namespaced)] @@ -17,34 +22,91 @@ use std::collections::BTreeMap; pub struct DBParameterGroupSpec { /// The description for the DB parameter group. pub description: String, - /// The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family. - /// To list all of the available parameter group families for a DB engine, use the following command: - /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" --engine - /// For example, to list all of the available parameter group families for the MySQL DB engine, use the following command: - /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" --engine mysql - /// The output contains duplicates. - /// The following are the valid DB engine values: - /// * aurora (for MySQL 5.6-compatible Aurora) - /// * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) - /// * aurora-postgresql - /// * mariadb - /// * mysql - /// * oracle-ee - /// * oracle-ee-cdb - /// * oracle-se2 - /// * oracle-se2-cdb - /// * postgres - /// * sqlserver-ee - /// * sqlserver-se - /// * sqlserver-ex - /// * sqlserver-web + /// The DB parameter group family name. A DB parameter group can be associated + /// with one and only one DB parameter group family, and can be applied only + /// to a DB instance running a database engine and engine version compatible + /// with that DB parameter group family. + /// + /// + /// To list all of the available parameter group families for a DB engine, use + /// the following command: + /// + /// + /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" + /// --engine + /// + /// + /// For example, to list all of the available parameter group families for the + /// MySQL DB engine, use the following command: + /// + /// + /// aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily" + /// --engine mysql + /// + /// + /// The output contains duplicates. + /// + /// + /// The following are the valid DB engine values: + /// + /// + /// * aurora (for MySQL 5.6-compatible Aurora) + /// + /// + /// * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) + /// + /// + /// * aurora-postgresql + /// + /// + /// * mariadb + /// + /// + /// * mysql + /// + /// + /// * oracle-ee + /// + /// + /// * oracle-ee-cdb + /// + /// + /// * oracle-se2 + /// + /// + /// * oracle-se2-cdb + /// + /// + /// * postgres + /// + /// + /// * sqlserver-ee + /// + /// + /// * sqlserver-se + /// + /// + /// * sqlserver-ex + /// + /// + /// * sqlserver-web pub family: String, - /// The name of the DB parameter group. - /// Constraints: - /// * Must be 1 to 255 letters, numbers, or hyphens. - /// * First character must be a letter - /// * Can't end with a hyphen or contain two consecutive hyphens - /// This value is stored as a lowercase string. + /// The name of the DB parameter group. + /// + /// + /// Constraints: + /// + /// + /// * Must be 1 to 255 letters, numbers, or hyphens. + /// + /// + /// * First character must be a letter + /// + /// + /// * Can't end with a hyphen or contain two consecutive hyphens + /// + /// + /// This value is stored as a lowercase string. pub name: String, #[serde(default, skip_serializing_if = "Option::is_none", rename = "parameterOverrides")] pub parameter_overrides: Option>, @@ -53,8 +115,11 @@ pub struct DBParameterGroupSpec { pub tags: Option>, } -/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. -/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide. +/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +/// +/// +/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBParameterGroupTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -66,10 +131,15 @@ pub struct DBParameterGroupTags { /// DBParameterGroupStatus defines the observed state of DBParameterGroup #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBParameterGroupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// A list of Parameter values. @@ -77,20 +147,32 @@ pub struct DBParameterGroupStatus { pub parameter_override_statuses: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBParameterGroupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBParameterGroupStatusConditions { /// Last time the condition transitioned from one status to another. @@ -109,8 +191,12 @@ pub struct DBParameterGroupStatusConditions { pub r#type: String, } -/// This data type is used as a request parameter in the ModifyDBParameterGroup and ResetDBParameterGroup actions. -/// This data type is used as a response element in the DescribeEngineDefaultParameters and DescribeDBParameters actions. +/// This data type is used as a request parameter in the ModifyDBParameterGroup +/// and ResetDBParameterGroup actions. +/// +/// +/// This data type is used as a response element in the DescribeEngineDefaultParameters +/// and DescribeDBParameters actions. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBParameterGroupStatusParameterOverrideStatuses { #[serde(default, skip_serializing_if = "Option::is_none", rename = "allowedValues")] diff --git a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbproxies.rs b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbproxies.rs index 9a8911d54..961ba39be 100644 --- a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbproxies.rs +++ b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbproxies.rs @@ -5,9 +5,13 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// DBProxySpec defines the desired state of DBProxy. -/// The data structure representing a proxy managed by the RDS Proxy. -/// This data type is used as a response element in the DescribeDBProxies action. +/// DBProxySpec defines the desired state of DBProxy. +/// +/// +/// The data structure representing a proxy managed by the RDS Proxy. +/// +/// +/// This data type is used as a response element in the DescribeDBProxies action. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "rds.services.k8s.aws", version = "v1alpha1", kind = "DBProxy", plural = "dbproxies")] #[kube(namespaced)] @@ -16,24 +20,45 @@ use serde::{Serialize, Deserialize}; pub struct DBProxySpec { /// The authorization mechanism that the proxy uses. pub auth: Vec, - /// Whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs. + /// Whether the proxy includes detailed information about SQL statements in its + /// logs. This information helps you to debug issues involving SQL behavior or + /// the performance and scalability of the proxy connections. The debug information + /// includes the text of SQL statements that you submit through the proxy. Thus, + /// only enable this setting when needed for debugging, and only when you have + /// security measures in place to safeguard any sensitive information that appears + /// in the logs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "debugLogging")] pub debug_logging: Option, - /// The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL. For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL. For RDS for Microsoft SQL Server, specify SQLSERVER. + /// The kinds of databases that the proxy can connect to. This value determines + /// which database network protocol the proxy recognizes when it interprets network + /// traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and + /// RDS for MySQL databases, specify MYSQL. For Aurora PostgreSQL and RDS for + /// PostgreSQL databases, specify POSTGRESQL. For RDS for Microsoft SQL Server, + /// specify SQLSERVER. #[serde(rename = "engineFamily")] pub engine_family: String, - /// The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database. + /// The number of seconds that a connection to the proxy can be inactive before + /// the proxy disconnects it. You can set this value higher or lower than the + /// connection timeout limit for the associated database. #[serde(default, skip_serializing_if = "Option::is_none", rename = "idleClientTimeout")] pub idle_client_timeout: Option, - /// The identifier for the proxy. This name must be unique for all proxies owned by your Amazon Web Services account in the specified Amazon Web Services Region. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens. + /// The identifier for the proxy. This name must be unique for all proxies owned + /// by your Amazon Web Services account in the specified Amazon Web Services + /// Region. An identifier must begin with a letter and must contain only ASCII + /// letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive + /// hyphens. pub name: String, - /// A Boolean parameter that specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy. + /// A Boolean parameter that specifies whether Transport Layer Security (TLS) + /// encryption is required for connections to the proxy. By enabling this setting, + /// you can enforce encrypted TLS connections to the proxy. #[serde(default, skip_serializing_if = "Option::is_none", rename = "requireTLS")] pub require_tls: Option, - /// The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in Amazon Web Services Secrets Manager. + /// The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access + /// secrets in Amazon Web Services Secrets Manager. #[serde(rename = "roleARN")] pub role_arn: String, - /// An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy. + /// An optional set of key-value pairs to associate arbitrary data of your choosing + /// with the proxy. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// One or more VPC security group IDs to associate with the new proxy. @@ -44,7 +69,8 @@ pub struct DBProxySpec { pub vpc_subnet_i_ds: Vec, } -/// Specifies the details of authentication used by a proxy to log in as a specific database user. +/// Specifies the details of authentication used by a proxy to log in as a specific +/// database user. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBProxyAuth { #[serde(default, skip_serializing_if = "Option::is_none", rename = "authScheme")] @@ -61,8 +87,11 @@ pub struct DBProxyAuth { pub user_name: Option, } -/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. -/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide. +/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +/// +/// +/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBProxyTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -74,19 +103,27 @@ pub struct DBProxyTags { /// DBProxyStatus defines the observed state of DBProxy #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBProxyStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The date and time when the proxy was first created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "createdDate")] pub created_date: Option, - /// The endpoint that you can use to connect to the DB proxy. You include the endpoint value in the connection string for a database client application. + /// The endpoint that you can use to connect to the DB proxy. You include the + /// endpoint value in the connection string for a database client application. #[serde(default, skip_serializing_if = "Option::is_none")] pub endpoint: Option, - /// The current status of this proxy. A status of available means the proxy is ready to handle requests. Other values indicate that you must wait for the proxy to be ready, or take some action to resolve an issue. + /// The current status of this proxy. A status of available means the proxy is + /// ready to handle requests. Other values indicate that you must wait for the + /// proxy to be ready, or take some action to resolve an issue. #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option, /// The date and time when the proxy was last updated. @@ -97,20 +134,32 @@ pub struct DBProxyStatus { pub vpc_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBProxyStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBProxyStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbsubnetgroups.rs b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbsubnetgroups.rs index a7e81fc69..ce51f6784 100644 --- a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbsubnetgroups.rs +++ b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/dbsubnetgroups.rs @@ -5,9 +5,14 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// DBSubnetGroupSpec defines the desired state of DBSubnetGroup. -/// Contains the details of an Amazon RDS DB subnet group. -/// This data type is used as a response element in the DescribeDBSubnetGroups action. +/// DBSubnetGroupSpec defines the desired state of DBSubnetGroup. +/// +/// +/// Contains the details of an Amazon RDS DB subnet group. +/// +/// +/// This data type is used as a response element in the DescribeDBSubnetGroups +/// action. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "rds.services.k8s.aws", version = "v1alpha1", kind = "DBSubnetGroup", plural = "dbsubnetgroups")] #[kube(namespaced)] @@ -16,12 +21,23 @@ use serde::{Serialize, Deserialize}; pub struct DBSubnetGroupSpec { /// The description for the DB subnet group. pub description: String, - /// The name for the DB subnet group. This value is stored as a lowercase string. - /// Constraints: - /// * Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. - /// * Must not be default. - /// * First character must be a letter. - /// Example: mydbsubnetgroup + /// The name for the DB subnet group. This value is stored as a lowercase string. + /// + /// + /// Constraints: + /// + /// + /// * Must contain no more than 255 letters, numbers, periods, underscores, + /// spaces, or hyphens. + /// + /// + /// * Must not be default. + /// + /// + /// * First character must be a letter. + /// + /// + /// Example: mydbsubnetgroup pub name: String, /// The EC2 Subnet IDs for the DB subnet group. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetIDs")] @@ -33,24 +49,35 @@ pub struct DBSubnetGroupSpec { pub tags: Option>, } -/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference type to provide more user friendly syntax for references using 'from' field Ex: APIIDRef: -/// from: name: my-api +/// AWSResourceReferenceWrapper provides a wrapper around *AWSResourceReference +/// type to provide more user friendly syntax for references using 'from' field +/// Ex: +/// APIIDRef: +/// +/// +/// from: +/// name: my-api #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupSubnetRefs { - /// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) + /// AWSResourceReference provides all the values necessary to reference another + /// k8s resource for finding the identifier(Id/ARN/Name) #[serde(default, skip_serializing_if = "Option::is_none")] pub from: Option, } -/// AWSResourceReference provides all the values necessary to reference another k8s resource for finding the identifier(Id/ARN/Name) +/// AWSResourceReference provides all the values necessary to reference another +/// k8s resource for finding the identifier(Id/ARN/Name) #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupSubnetRefsFrom { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. -/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the Amazon RDS User Guide. +/// Metadata assigned to an Amazon RDS resource consisting of a key-value pair. +/// +/// +/// For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -62,10 +89,15 @@ pub struct DBSubnetGroupTags { /// DBSubnetGroupStatus defines the observed state of DBSubnetGroup #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// Provides the status of the DB subnet group. @@ -74,12 +106,24 @@ pub struct DBSubnetGroupStatus { /// Contains a list of Subnet elements. #[serde(default, skip_serializing_if = "Option::is_none")] pub subnets: Option>, - /// The network type of the DB subnet group. - /// Valid values: - /// * IPV4 - /// * DUAL - /// A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). - /// For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the Amazon RDS User Guide. + /// The network type of the DB subnet group. + /// + /// + /// Valid values: + /// + /// + /// * IPV4 + /// + /// + /// * DUAL + /// + /// + /// A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 + /// protocols (DUAL). + /// + /// + /// For more information, see Working with a DB instance in a VPC (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "supportedNetworkTypes")] pub supported_network_types: Option>, /// Provides the VpcId of the DB subnet group. @@ -87,20 +131,32 @@ pub struct DBSubnetGroupStatus { pub vpc_id: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupStatusConditions { /// Last time the condition transitioned from one status to another. @@ -119,33 +175,48 @@ pub struct DBSubnetGroupStatusConditions { pub r#type: String, } -/// This data type is used as a response element for the DescribeDBSubnetGroups operation. +/// This data type is used as a response element for the DescribeDBSubnetGroups +/// operation. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupStatusSubnets { - /// Contains Availability Zone information. - /// This data type is used as an element in the OrderableDBInstanceOption data type. + /// Contains Availability Zone information. + /// + /// + /// This data type is used as an element in the OrderableDBInstanceOption data + /// type. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetAvailabilityZone")] pub subnet_availability_zone: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetIdentifier")] pub subnet_identifier: Option, - /// A data type that represents an Outpost. - /// For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. + /// A data type that represents an Outpost. + /// + /// + /// For more information about RDS on Outposts, see Amazon RDS on Amazon Web + /// Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + /// in the Amazon RDS User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetOutpost")] pub subnet_outpost: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetStatus")] pub subnet_status: Option, } -/// Contains Availability Zone information. -/// This data type is used as an element in the OrderableDBInstanceOption data type. +/// Contains Availability Zone information. +/// +/// +/// This data type is used as an element in the OrderableDBInstanceOption data +/// type. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupStatusSubnetsSubnetAvailabilityZone { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, } -/// A data type that represents an Outpost. -/// For more information about RDS on Outposts, see Amazon RDS on Amazon Web Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the Amazon RDS User Guide. +/// A data type that represents an Outpost. +/// +/// +/// For more information about RDS on Outposts, see Amazon RDS on Amazon Web +/// Services Outposts (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) +/// in the Amazon RDS User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DBSubnetGroupStatusSubnetsSubnetOutpost { #[serde(default, skip_serializing_if = "Option::is_none")] diff --git a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/globalclusters.rs b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/globalclusters.rs index 0423f4945..1a148ac51 100644 --- a/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/globalclusters.rs +++ b/kube-custom-resources-rs/src/rds_services_k8s_aws/v1alpha1/globalclusters.rs @@ -5,18 +5,23 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// GlobalClusterSpec defines the desired state of GlobalCluster. -/// A data type representing an Aurora global database. +/// GlobalClusterSpec defines the desired state of GlobalCluster. +/// +/// +/// A data type representing an Aurora global database. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "rds.services.k8s.aws", version = "v1alpha1", kind = "GlobalCluster", plural = "globalclusters")] #[kube(namespaced)] #[kube(status = "GlobalClusterStatus")] #[kube(schema = "disabled")] pub struct GlobalClusterSpec { - /// The name for your database of up to 64 alphanumeric characters. If you do not provide a name, Amazon Aurora will not create a database in the global database cluster you are creating. + /// The name for your database of up to 64 alphanumeric characters. If you do + /// not provide a name, Amazon Aurora will not create a database in the global + /// database cluster you are creating. #[serde(default, skip_serializing_if = "Option::is_none", rename = "databaseName")] pub database_name: Option, - /// The deletion protection setting for the new global database. The global database can't be deleted when deletion protection is enabled. + /// The deletion protection setting for the new global database. The global database + /// can't be deleted when deletion protection is enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "deletionProtection")] pub deletion_protection: Option, /// The name of the database engine to be used for this DB cluster. @@ -28,7 +33,8 @@ pub struct GlobalClusterSpec { /// The cluster identifier of the new global database cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalClusterIdentifier")] pub global_cluster_identifier: Option, - /// The Amazon Resource Name (ARN) to use as the primary cluster of the global database. This parameter is optional. + /// The Amazon Resource Name (ARN) to use as the primary cluster of the global + /// database. This parameter is optional. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceDBClusterIdentifier")] pub source_db_cluster_identifier: Option, /// The storage encryption setting for the new global database cluster. @@ -39,19 +45,30 @@ pub struct GlobalClusterSpec { /// GlobalClusterStatus defines the observed state of GlobalCluster #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalClusterStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// A data object containing all properties for the current state of an in-process or pending failover process for this Aurora global database. This object is empty unless the FailoverGlobalCluster API operation has been called on this Aurora global database (GlobalCluster). + /// A data object containing all properties for the current state of an in-process + /// or pending failover process for this Aurora global database. This object + /// is empty unless the FailoverGlobalCluster API operation has been called on + /// this Aurora global database (GlobalCluster). #[serde(default, skip_serializing_if = "Option::is_none", rename = "failoverState")] pub failover_state: Option, /// The list of primary and secondary clusters within the global database cluster. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalClusterMembers")] pub global_cluster_members: Option>, - /// The Amazon Web Services Region-unique, immutable identifier for the global database cluster. This identifier is found in Amazon Web Services CloudTrail log entries whenever the Amazon Web Services KMS key for the DB cluster is accessed. + /// The Amazon Web Services Region-unique, immutable identifier for the global + /// database cluster. This identifier is found in Amazon Web Services CloudTrail + /// log entries whenever the Amazon Web Services KMS key for the DB cluster is + /// accessed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "globalClusterResourceID")] pub global_cluster_resource_id: Option, /// Specifies the current state of this global database cluster. @@ -59,20 +76,32 @@ pub struct GlobalClusterStatus { pub status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalClusterStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalClusterStatusConditions { /// Last time the condition transitioned from one status to another. @@ -91,7 +120,10 @@ pub struct GlobalClusterStatusConditions { pub r#type: String, } -/// A data object containing all properties for the current state of an in-process or pending failover process for this Aurora global database. This object is empty unless the FailoverGlobalCluster API operation has been called on this Aurora global database (GlobalCluster). +/// A data object containing all properties for the current state of an in-process +/// or pending failover process for this Aurora global database. This object +/// is empty unless the FailoverGlobalCluster API operation has been called on +/// this Aurora global database (GlobalCluster). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalClusterStatusFailoverState { #[serde(default, skip_serializing_if = "Option::is_none", rename = "fromDBClusterARN")] @@ -102,7 +134,8 @@ pub struct GlobalClusterStatusFailoverState { pub to_db_cluster_arn: Option, } -/// A data structure with information about any primary and secondary clusters associated with an Aurora global database. +/// A data structure with information about any primary and secondary clusters +/// associated with an Aurora global database. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct GlobalClusterStatusGlobalClusterMembers { #[serde(default, skip_serializing_if = "Option::is_none", rename = "dbClusterARN")] diff --git a/kube-custom-resources-rs/src/s3_services_k8s_aws/v1alpha1/buckets.rs b/kube-custom-resources-rs/src/s3_services_k8s_aws/v1alpha1/buckets.rs index 103fe098f..a5b471dd3 100644 --- a/kube-custom-resources-rs/src/s3_services_k8s_aws/v1alpha1/buckets.rs +++ b/kube-custom-resources-rs/src/s3_services_k8s_aws/v1alpha1/buckets.rs @@ -5,8 +5,12 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// BucketSpec defines the desired state of Bucket. -/// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally unique, and the namespace is shared by all Amazon Web Services accounts. +/// BucketSpec defines the desired state of Bucket. +/// +/// +/// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +/// is globally unique, and the namespace is shared by all Amazon Web Services +/// accounts. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "s3.services.k8s.aws", version = "v1alpha1", kind = "Bucket", plural = "buckets")] #[kube(namespaced)] @@ -21,7 +25,10 @@ pub struct BucketSpec { pub acl: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub analytics: Option>, - /// Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 User Guide. + /// Describes the cross-origin access configuration for objects in an Amazon + /// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + /// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + /// S3 User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub cors: Option, /// The configuration information for the bucket. @@ -30,7 +37,8 @@ pub struct BucketSpec { /// Specifies the default server-side-encryption configuration. #[serde(default, skip_serializing_if = "Option::is_none")] pub encryption: Option, - /// Allows grantee the read, write, read ACP, and write ACP permissions on the bucket. + /// Allows grantee the read, write, read ACP, and write ACP permissions on the + /// bucket. #[serde(default, skip_serializing_if = "Option::is_none", rename = "grantFullControl")] pub grant_full_control: Option, /// Allows grantee to list the objects in the bucket. @@ -39,8 +47,11 @@ pub struct BucketSpec { /// Allows grantee to read the bucket ACL. #[serde(default, skip_serializing_if = "Option::is_none", rename = "grantReadACP")] pub grant_read_acp: Option, - /// Allows grantee to create new objects in the bucket. - /// For the bucket and object owners of existing objects, also allows deletions and overwrites of those objects. + /// Allows grantee to create new objects in the bucket. + /// + /// + /// For the bucket and object owners of existing objects, also allows deletions + /// and overwrites of those objects. #[serde(default, skip_serializing_if = "Option::is_none", rename = "grantWrite")] pub grant_write: Option, /// Allows grantee to write the ACL for the applicable bucket. @@ -60,7 +71,8 @@ pub struct BucketSpec { pub metrics: Option>, /// The name of the bucket to create. pub name: String, - /// A container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off for the bucket. + /// A container for specifying the notification configuration of the bucket. + /// If this element is empty, notifications are turned off for the bucket. #[serde(default, skip_serializing_if = "Option::is_none")] pub notification: Option, /// Specifies whether you want S3 Object Lock to be enabled for the new bucket. @@ -68,16 +80,22 @@ pub struct BucketSpec { pub object_lock_enabled_for_bucket: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "objectOwnership")] pub object_ownership: Option, - /// The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3 bucket. + /// The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + /// that you want to apply to this Amazon S3 bucket. #[serde(default, skip_serializing_if = "Option::is_none", rename = "ownershipControls")] pub ownership_controls: Option, /// The bucket policy as a JSON document. #[serde(default, skip_serializing_if = "Option::is_none")] pub policy: Option, - /// The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) in the Amazon S3 User Guide. + /// The PublicAccessBlock configuration that you want to apply to this Amazon + /// S3 bucket. You can enable the configuration options in any combination. For + /// more information about when Amazon S3 considers a bucket or object public, + /// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + /// in the Amazon S3 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "publicAccessBlock")] pub public_access_block: Option, - /// A container for replication rules. You can add up to 1,000 rules. The maximum size of a replication configuration is 2 MB. + /// A container for replication rules. You can add up to 1,000 rules. The maximum + /// size of a replication configuration is 2 MB. #[serde(default, skip_serializing_if = "Option::is_none")] pub replication: Option, /// Container for Payer. @@ -101,23 +119,32 @@ pub struct BucketAccelerate { pub status: Option, } -/// Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket. +/// Specifies the configuration and any analyses for the analytics filter of +/// an Amazon S3 bucket. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketAnalytics { - /// The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis. + /// The filter used to describe a set of objects for analyses. A filter must + /// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + /// If no filter is provided, all objects will be considered in any analysis. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, - /// Specifies data related to access patterns to be collected and made available to analyze the tradeoffs between different storage classes for an Amazon S3 bucket. + /// Specifies data related to access patterns to be collected and made available + /// to analyze the tradeoffs between different storage classes for an Amazon + /// S3 bucket. #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageClassAnalysis")] pub storage_class_analysis: Option, } -/// The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis. +/// The filter used to describe a set of objects for analyses. A filter must +/// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +/// If no filter is provided, all objects will be considered in any analysis. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketAnalyticsFilter { - /// A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates in any combination, and an object must match all of the predicates for the filter to apply. + /// A conjunction (logical AND) of predicates, which is used in evaluating a + /// metrics filter. The operator must have at least two predicates in any combination, + /// and an object must match all of the predicates for the filter to apply. #[serde(default, skip_serializing_if = "Option::is_none")] pub and: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -127,7 +154,9 @@ pub struct BucketAnalyticsFilter { pub tag: Option, } -/// A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates in any combination, and an object must match all of the predicates for the filter to apply. +/// A conjunction (logical AND) of predicates, which is used in evaluating a +/// metrics filter. The operator must have at least two predicates in any combination, +/// and an object must match all of the predicates for the filter to apply. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketAnalyticsFilterAnd { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -154,15 +183,19 @@ pub struct BucketAnalyticsFilterTag { pub value: Option, } -/// Specifies data related to access patterns to be collected and made available to analyze the tradeoffs between different storage classes for an Amazon S3 bucket. +/// Specifies data related to access patterns to be collected and made available +/// to analyze the tradeoffs between different storage classes for an Amazon +/// S3 bucket. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketAnalyticsStorageClassAnalysis { - /// Container for data related to the storage class analysis for an Amazon S3 bucket for export. + /// Container for data related to the storage class analysis for an Amazon S3 + /// bucket for export. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataExport")] pub data_export: Option, } -/// Container for data related to the storage class analysis for an Amazon S3 bucket for export. +/// Container for data related to the storage class analysis for an Amazon S3 +/// bucket for export. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketAnalyticsStorageClassAnalysisDataExport { /// Where to publish the analytics results. @@ -193,7 +226,10 @@ pub struct BucketAnalyticsStorageClassAnalysisDataExportDestinationS3BucketDesti pub prefix: Option, } -/// Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 User Guide. +/// Describes the cross-origin access configuration for objects in an Amazon +/// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +/// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +/// S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketCors { #[serde(default, skip_serializing_if = "Option::is_none", rename = "corsRules")] @@ -234,14 +270,30 @@ pub struct BucketEncryption { /// Specifies the default server-side encryption configuration. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketEncryptionRules { - /// Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) in the Amazon S3 API Reference. + /// Describes the default server-side encryption to apply to new objects in the + /// bucket. If a PUT Object request doesn't specify any server-side encryption, + /// this default encryption will be applied. If you don't specify a customer + /// managed key at configuration, Amazon S3 automatically creates an Amazon Web + /// Services KMS key in your Amazon Web Services account the first time that + /// you add an object encrypted with SSE-KMS to a bucket. By default, Amazon + /// S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption + /// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) + /// in the Amazon S3 API Reference. #[serde(default, skip_serializing_if = "Option::is_none", rename = "applyServerSideEncryptionByDefault")] pub apply_server_side_encryption_by_default: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "bucketKeyEnabled")] pub bucket_key_enabled: Option, } -/// Describes the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) in the Amazon S3 API Reference. +/// Describes the default server-side encryption to apply to new objects in the +/// bucket. If a PUT Object request doesn't specify any server-side encryption, +/// this default encryption will be applied. If you don't specify a customer +/// managed key at configuration, Amazon S3 automatically creates an Amazon Web +/// Services KMS key in your Amazon Web Services account the first time that +/// you add an object encrypted with SSE-KMS to a bucket. By default, Amazon +/// S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption +/// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +/// in the Amazon S3 API Reference. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketEncryptionRulesApplyServerSideEncryptionByDefault { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsMasterKeyID")] @@ -250,11 +302,16 @@ pub struct BucketEncryptionRulesApplyServerSideEncryptionByDefault { pub sse_algorithm: Option, } -/// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. -/// For information about the S3 Intelligent-Tiering storage class, see Storage class for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +/// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +/// +/// +/// For information about the S3 Intelligent-Tiering storage class, see Storage +/// class for automatically optimizing frequently and infrequently accessed objects +/// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketIntelligentTiering { - /// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration applies to. + /// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration + /// applies to. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -265,10 +322,12 @@ pub struct BucketIntelligentTiering { pub tierings: Option>, } -/// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration applies to. +/// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +/// applies to. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketIntelligentTieringFilter { - /// A container for specifying S3 Intelligent-Tiering filters. The filters determine the subset of objects to which the rule applies. + /// A container for specifying S3 Intelligent-Tiering filters. The filters determine + /// the subset of objects to which the rule applies. #[serde(default, skip_serializing_if = "Option::is_none")] pub and: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -278,7 +337,8 @@ pub struct BucketIntelligentTieringFilter { pub tag: Option, } -/// A container for specifying S3 Intelligent-Tiering filters. The filters determine the subset of objects to which the rule applies. +/// A container for specifying S3 Intelligent-Tiering filters. The filters determine +/// the subset of objects to which the rule applies. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketIntelligentTieringFilterAnd { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -305,7 +365,9 @@ pub struct BucketIntelligentTieringFilterTag { pub value: Option, } -/// The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. +/// The S3 Intelligent-Tiering storage class is designed to optimize storage +/// costs by automatically moving data to the most cost-effective storage access +/// tier, without additional operational overhead. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketIntelligentTieringTierings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessTier")] @@ -314,13 +376,16 @@ pub struct BucketIntelligentTieringTierings { pub days: Option, } -/// Specifies the inventory configuration for an Amazon S3 bucket. For more information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the Amazon S3 API Reference. +/// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +/// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +/// in the Amazon S3 API Reference. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketInventory { /// Specifies the inventory configuration for an Amazon S3 bucket. #[serde(default, skip_serializing_if = "Option::is_none")] pub destination: Option, - /// Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria. + /// Specifies an inventory filter. The inventory only includes objects that meet + /// the filter's criteria. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -339,19 +404,22 @@ pub struct BucketInventory { /// Specifies the inventory configuration for an Amazon S3 bucket. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketInventoryDestination { - /// Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published. + /// Contains the bucket name, file format, bucket owner (optional), and prefix + /// (optional) where inventory results are published. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3BucketDestination")] pub s3_bucket_destination: Option, } -/// Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published. +/// Contains the bucket name, file format, bucket owner (optional), and prefix +/// (optional) where inventory results are published. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketInventoryDestinationS3BucketDestination { #[serde(default, skip_serializing_if = "Option::is_none", rename = "accountID")] pub account_id: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub bucket: Option, - /// Contains the type of server-side encryption used to encrypt the inventory results. + /// Contains the type of server-side encryption used to encrypt the inventory + /// results. #[serde(default, skip_serializing_if = "Option::is_none")] pub encryption: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -360,7 +428,8 @@ pub struct BucketInventoryDestinationS3BucketDestination { pub prefix: Option, } -/// Contains the type of server-side encryption used to encrypt the inventory results. +/// Contains the type of server-side encryption used to encrypt the inventory +/// results. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketInventoryDestinationS3BucketDestinationEncryption { /// Specifies the use of SSE-KMS to encrypt delivered inventory reports. @@ -375,7 +444,8 @@ pub struct BucketInventoryDestinationS3BucketDestinationEncryptionSseKms { pub key_id: Option, } -/// Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria. +/// Specifies an inventory filter. The inventory only includes objects that meet +/// the filter's criteria. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketInventoryFilter { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -399,18 +469,27 @@ pub struct BucketLifecycle { /// A lifecycle rule for individual objects in an Amazon S3 bucket. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLifecycleRules { - /// Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) in the Amazon S3 User Guide. + /// Specifies the days since the initiation of an incomplete multipart upload + /// that Amazon S3 will wait before permanently removing all parts of the upload. + /// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + /// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + /// in the Amazon S3 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "abortIncompleteMultipartUpload")] pub abort_incomplete_multipart_upload: Option, /// Container for the expiration for the lifecycle of the object. #[serde(default, skip_serializing_if = "Option::is_none")] pub expiration: Option, - /// The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified. + /// The Filter is used to identify objects that a Lifecycle Rule applies to. + /// A Filter must have exactly one of Prefix, Tag, or And specified. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, - /// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime. + /// Specifies when noncurrent object versions expire. Upon expiration, Amazon + /// S3 permanently deletes the noncurrent object versions. You set this lifecycle + /// configuration action on a bucket that has versioning enabled (or suspended) + /// to request that Amazon S3 delete noncurrent object versions at a specific + /// period in the object's lifetime. #[serde(default, skip_serializing_if = "Option::is_none", rename = "noncurrentVersionExpiration")] pub noncurrent_version_expiration: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "noncurrentVersionTransitions")] @@ -423,7 +502,11 @@ pub struct BucketLifecycleRules { pub transitions: Option>, } -/// Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) in the Amazon S3 User Guide. +/// Specifies the days since the initiation of an incomplete multipart upload +/// that Amazon S3 will wait before permanently removing all parts of the upload. +/// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +/// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLifecycleRulesAbortIncompleteMultipartUpload { #[serde(default, skip_serializing_if = "Option::is_none", rename = "daysAfterInitiation")] @@ -441,10 +524,13 @@ pub struct BucketLifecycleRulesExpiration { pub expired_object_delete_marker: Option, } -/// The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified. +/// The Filter is used to identify objects that a Lifecycle Rule applies to. +/// A Filter must have exactly one of Prefix, Tag, or And specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLifecycleRulesFilter { - /// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator. + /// This is used in a Lifecycle Rule Filter to apply a logical AND to two or + /// more predicates. The Lifecycle Rule will apply to any object matching all + /// of the predicates configured inside the And operator. #[serde(default, skip_serializing_if = "Option::is_none")] pub and: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "objectSizeGreaterThan")] @@ -458,7 +544,9 @@ pub struct BucketLifecycleRulesFilter { pub tag: Option, } -/// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator. +/// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +/// more predicates. The Lifecycle Rule will apply to any object matching all +/// of the predicates configured inside the And operator. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLifecycleRulesFilterAnd { #[serde(default, skip_serializing_if = "Option::is_none", rename = "objectSizeGreaterThan")] @@ -489,7 +577,11 @@ pub struct BucketLifecycleRulesFilterTag { pub value: Option, } -/// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime. +/// Specifies when noncurrent object versions expire. Upon expiration, Amazon +/// S3 permanently deletes the noncurrent object versions. You set this lifecycle +/// configuration action on a bucket that has versioning enabled (or suspended) +/// to request that Amazon S3 delete noncurrent object versions at a specific +/// period in the object's lifetime. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLifecycleRulesNoncurrentVersionExpiration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "newerNoncurrentVersions")] @@ -498,7 +590,13 @@ pub struct BucketLifecycleRulesNoncurrentVersionExpiration { pub noncurrent_days: Option, } -/// Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's lifetime. +/// Container for the transition rule that describes when noncurrent objects +/// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, +/// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled +/// (or versioning is suspended), you can set this action to request that Amazon +/// S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, +/// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at +/// a specific period in the object's lifetime. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLifecycleRulesNoncurrentVersionTransitions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "newerNoncurrentVersions")] @@ -509,7 +607,10 @@ pub struct BucketLifecycleRulesNoncurrentVersionTransitions { pub storage_class: Option, } -/// Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) in the Amazon S3 User Guide. +/// Specifies when an object transitions to a specified storage class. For more +/// information about Amazon S3 lifecycle configuration rules, see Transitioning +/// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLifecycleRulesTransitions { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -523,12 +624,18 @@ pub struct BucketLifecycleRulesTransitions { /// Container for logging status information. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLogging { - /// Describes where logs are stored and the prefix that Amazon S3 assigns to all log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in the Amazon S3 API Reference. + /// Describes where logs are stored and the prefix that Amazon S3 assigns to + /// all log object keys for a bucket. For more information, see PUT Bucket logging + /// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + /// in the Amazon S3 API Reference. #[serde(default, skip_serializing_if = "Option::is_none", rename = "loggingEnabled")] pub logging_enabled: Option, } -/// Describes where logs are stored and the prefix that Amazon S3 assigns to all log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in the Amazon S3 API Reference. +/// Describes where logs are stored and the prefix that Amazon S3 assigns to +/// all log object keys for a bucket. For more information, see PUT Bucket logging +/// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +/// in the Amazon S3 API Reference. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLoggingLoggingEnabled { #[serde(default, skip_serializing_if = "Option::is_none", rename = "targetBucket")] @@ -539,8 +646,13 @@ pub struct BucketLoggingLoggingEnabled { pub target_prefix: Option, } -/// Container for granting information. -/// Buckets that use the bucket owner enforced setting for Object Ownership don't support target grants. For more information, see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) in the Amazon S3 User Guide. +/// Container for granting information. +/// +/// +/// Buckets that use the bucket owner enforced setting for Object Ownership don't +/// support target grants. For more information, see Permissions server access +/// log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketLoggingLoggingEnabledTargetGrants { /// Container for the person being granted permissions. @@ -565,22 +677,35 @@ pub struct BucketLoggingLoggingEnabledTargetGrantsGrantee { pub u_ri: Option, } -/// Specifies a metrics configuration for the CloudWatch request metrics (specified by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). +/// Specifies a metrics configuration for the CloudWatch request metrics (specified +/// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +/// an existing metrics configuration, note that this is a full replacement of +/// the existing metrics configuration. If you don't include the elements you +/// want to keep, they are erased. For more information, see PutBucketMetricsConfiguration +/// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketMetrics { - /// Specifies a metrics configuration filter. The metrics configuration only includes objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). + /// Specifies a metrics configuration filter. The metrics configuration only + /// includes objects that meet the filter's criteria. A filter must be a prefix, + /// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). + /// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, } -/// Specifies a metrics configuration filter. The metrics configuration only includes objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). +/// Specifies a metrics configuration filter. The metrics configuration only +/// includes objects that meet the filter's criteria. A filter must be a prefix, +/// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). +/// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketMetricsFilter { #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessPointARN")] pub access_point_arn: Option, - /// A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply. + /// A conjunction (logical AND) of predicates, which is used in evaluating a + /// metrics filter. The operator must have at least two predicates, and an object + /// must match all of the predicates in order for the filter to apply. #[serde(default, skip_serializing_if = "Option::is_none")] pub and: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -590,7 +715,9 @@ pub struct BucketMetricsFilter { pub tag: Option, } -/// A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply. +/// A conjunction (logical AND) of predicates, which is used in evaluating a +/// metrics filter. The operator must have at least two predicates, and an object +/// must match all of the predicates in order for the filter to apply. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketMetricsFilterAnd { #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessPointARN")] @@ -619,7 +746,8 @@ pub struct BucketMetricsFilterTag { pub value: Option, } -/// A container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off for the bucket. +/// A container for specifying the notification configuration of the bucket. +/// If this element is empty, notifications are turned off for the bucket. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "lambdaFunctionConfigurations")] @@ -635,17 +763,22 @@ pub struct BucketNotification { pub struct BucketNotificationLambdaFunctionConfigurations { #[serde(default, skip_serializing_if = "Option::is_none")] pub events: Option>, - /// Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide. + /// Specifies object key name filtering rules. For information about key name + /// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + /// in the Amazon S3 User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, - /// An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID. + /// An optional unique identifier for configurations in a notification configuration. + /// If you don't provide one, Amazon S3 will assign an ID. #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "lambdaFunctionARN")] pub lambda_function_arn: Option, } -/// Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide. +/// Specifies object key name filtering rules. For information about key name +/// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationLambdaFunctionConfigurationsFilter { /// A container for object key name prefix and suffix filtering rules. @@ -656,12 +789,14 @@ pub struct BucketNotificationLambdaFunctionConfigurationsFilter { /// A container for object key name prefix and suffix filtering rules. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationLambdaFunctionConfigurationsFilterKey { - /// A list of containers for the key-value pair that defines the criteria for the filter rule. + /// A list of containers for the key-value pair that defines the criteria for + /// the filter rule. #[serde(default, skip_serializing_if = "Option::is_none", rename = "filterRules")] pub filter_rules: Option>, } -/// Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix or prefix of the key name. +/// Specifies the Amazon S3 object key name to filter on and whether to filter +/// on the suffix or prefix of the key name. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationLambdaFunctionConfigurationsFilterKeyFilterRules { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -670,22 +805,28 @@ pub struct BucketNotificationLambdaFunctionConfigurationsFilterKeyFilterRules { pub value: Option, } -/// Specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events. +/// Specifies the configuration for publishing messages to an Amazon Simple Queue +/// Service (Amazon SQS) queue when Amazon S3 detects specified events. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationQueueConfigurations { #[serde(default, skip_serializing_if = "Option::is_none")] pub events: Option>, - /// Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide. + /// Specifies object key name filtering rules. For information about key name + /// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + /// in the Amazon S3 User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, - /// An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID. + /// An optional unique identifier for configurations in a notification configuration. + /// If you don't provide one, Amazon S3 will assign an ID. #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "queueARN")] pub queue_arn: Option, } -/// Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide. +/// Specifies object key name filtering rules. For information about key name +/// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationQueueConfigurationsFilter { /// A container for object key name prefix and suffix filtering rules. @@ -696,12 +837,14 @@ pub struct BucketNotificationQueueConfigurationsFilter { /// A container for object key name prefix and suffix filtering rules. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationQueueConfigurationsFilterKey { - /// A list of containers for the key-value pair that defines the criteria for the filter rule. + /// A list of containers for the key-value pair that defines the criteria for + /// the filter rule. #[serde(default, skip_serializing_if = "Option::is_none", rename = "filterRules")] pub filter_rules: Option>, } -/// Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix or prefix of the key name. +/// Specifies the Amazon S3 object key name to filter on and whether to filter +/// on the suffix or prefix of the key name. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationQueueConfigurationsFilterKeyFilterRules { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -710,22 +853,29 @@ pub struct BucketNotificationQueueConfigurationsFilterKeyFilterRules { pub value: Option, } -/// A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events. +/// A container for specifying the configuration for publication of messages +/// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +/// detects specified events. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationTopicConfigurations { #[serde(default, skip_serializing_if = "Option::is_none")] pub events: Option>, - /// Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide. + /// Specifies object key name filtering rules. For information about key name + /// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + /// in the Amazon S3 User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, - /// An optional unique identifier for configurations in a notification configuration. If you don't provide one, Amazon S3 will assign an ID. + /// An optional unique identifier for configurations in a notification configuration. + /// If you don't provide one, Amazon S3 will assign an ID. #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "topicARN")] pub topic_arn: Option, } -/// Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the Amazon S3 User Guide. +/// Specifies object key name filtering rules. For information about key name +/// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationTopicConfigurationsFilter { /// A container for object key name prefix and suffix filtering rules. @@ -736,12 +886,14 @@ pub struct BucketNotificationTopicConfigurationsFilter { /// A container for object key name prefix and suffix filtering rules. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationTopicConfigurationsFilterKey { - /// A list of containers for the key-value pair that defines the criteria for the filter rule. + /// A list of containers for the key-value pair that defines the criteria for + /// the filter rule. #[serde(default, skip_serializing_if = "Option::is_none", rename = "filterRules")] pub filter_rules: Option>, } -/// Specifies the Amazon S3 object key name to filter on and whether to filter on the suffix or prefix of the key name. +/// Specifies the Amazon S3 object key name to filter on and whether to filter +/// on the suffix or prefix of the key name. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketNotificationTopicConfigurationsFilterKeyFilterRules { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -750,7 +902,8 @@ pub struct BucketNotificationTopicConfigurationsFilterKeyFilterRules { pub value: Option, } -/// The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3 bucket. +/// The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) +/// that you want to apply to this Amazon S3 bucket. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketOwnershipControls { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -760,15 +913,32 @@ pub struct BucketOwnershipControls { /// The container element for an ownership control rule. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketOwnershipControlsRules { - /// The container element for object ownership for a bucket's ownership controls. - /// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the bucket-owner-full-control canned ACL. - /// ObjectWriter - The uploading account will own the object if the object is uploaded with the bucket-owner-full-control canned ACL. - /// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer affect permissions. The bucket owner automatically owns and has full control over every object in the bucket. The bucket only accepts PUT requests that don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. + /// The container element for object ownership for a bucket's ownership controls. + /// + /// + /// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + /// the bucket owner if the objects are uploaded with the bucket-owner-full-control + /// canned ACL. + /// + /// + /// ObjectWriter - The uploading account will own the object if the object is + /// uploaded with the bucket-owner-full-control canned ACL. + /// + /// + /// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + /// affect permissions. The bucket owner automatically owns and has full control + /// over every object in the bucket. The bucket only accepts PUT requests that + /// don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control + /// canned ACL or an equivalent form of this ACL expressed in the XML format. #[serde(default, skip_serializing_if = "Option::is_none", rename = "objectOwnership")] pub object_ownership: Option, } -/// The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) in the Amazon S3 User Guide. +/// The PublicAccessBlock configuration that you want to apply to this Amazon +/// S3 bucket. You can enable the configuration options in any combination. For +/// more information about when Amazon S3 considers a bucket or object public, +/// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketPublicAccessBlock { #[serde(default, skip_serializing_if = "Option::is_none", rename = "blockPublicACLs")] @@ -781,7 +951,8 @@ pub struct BucketPublicAccessBlock { pub restrict_public_buckets: Option, } -/// A container for replication rules. You can add up to 1,000 rules. The maximum size of a replication configuration is 2 MB. +/// A container for replication rules. You can add up to 1,000 rules. The maximum +/// size of a replication configuration is 2 MB. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplication { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -793,18 +964,34 @@ pub struct BucketReplication { /// Specifies which Amazon S3 objects to replicate and where to store the replicas. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRules { - /// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter in your replication configuration, you must also include a DeleteMarkerReplication element. If your Filter includes a Tag element, the DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does not support replicating delete markers for tag-based rules. For an example configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). - /// For more information about delete marker replication, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). - /// If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + /// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + /// in your replication configuration, you must also include a DeleteMarkerReplication + /// element. If your Filter includes a Tag element, the DeleteMarkerReplication + /// Status must be set to Disabled, because Amazon S3 does not support replicating + /// delete markers for tag-based rules. For an example configuration, see Basic + /// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + /// + /// + /// For more information about delete marker replication, see Basic Rule Configuration + /// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + /// + /// + /// If you are using an earlier version of the replication configuration, Amazon + /// S3 handles replication of delete markers differently. For more information, + /// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). #[serde(default, skip_serializing_if = "Option::is_none", rename = "deleteMarkerReplication")] pub delete_marker_replication: Option, - /// Specifies information about where to publish analysis or configuration results for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). + /// Specifies information about where to publish analysis or configuration results + /// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). #[serde(default, skip_serializing_if = "Option::is_none")] pub destination: Option, - /// Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) in the Amazon S3 User Guide. + /// Optional configuration to replicate existing source bucket objects. For more + /// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + /// in the Amazon S3 User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "existingObjectReplication")] pub existing_object_replication: Option, - /// A filter that identifies the subset of objects to which the replication rule applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + /// A filter that identifies the subset of objects to which the replication rule + /// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. #[serde(default, skip_serializing_if = "Option::is_none")] pub filter: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -813,23 +1000,41 @@ pub struct BucketReplicationRules { pub prefix: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub priority: Option, - /// A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer managed key stored in Amazon Web Services Key Management Service (SSE-KMS). + /// A container that describes additional filters for identifying the source + /// objects that you want to replicate. You can choose to enable or disable the + /// replication of these objects. Currently, Amazon S3 supports only the filter + /// that you can specify for objects created with server-side encryption using + /// a customer managed key stored in Amazon Web Services Key Management Service + /// (SSE-KMS). #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceSelectionCriteria")] pub source_selection_criteria: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option, } -/// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter in your replication configuration, you must also include a DeleteMarkerReplication element. If your Filter includes a Tag element, the DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does not support replicating delete markers for tag-based rules. For an example configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). -/// For more information about delete marker replication, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). -/// If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +/// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +/// in your replication configuration, you must also include a DeleteMarkerReplication +/// element. If your Filter includes a Tag element, the DeleteMarkerReplication +/// Status must be set to Disabled, because Amazon S3 does not support replicating +/// delete markers for tag-based rules. For an example configuration, see Basic +/// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +/// +/// +/// For more information about delete marker replication, see Basic Rule Configuration +/// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +/// +/// +/// If you are using an earlier version of the replication configuration, Amazon +/// S3 handles replication of delete markers differently. For more information, +/// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesDeleteMarkerReplication { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option, } -/// Specifies information about where to publish analysis or configuration results for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +/// Specifies information about where to publish analysis or configuration results +/// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesDestination { /// A container for information about access control for replicas. @@ -839,13 +1044,18 @@ pub struct BucketReplicationRulesDestination { pub account: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub bucket: Option, - /// Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects. + /// Specifies encryption-related information for an Amazon S3 bucket that is + /// a destination for replicated objects. #[serde(default, skip_serializing_if = "Option::is_none", rename = "encryptionConfiguration")] pub encryption_configuration: Option, - /// A container specifying replication metrics-related settings enabling replication metrics and events. + /// A container specifying replication metrics-related settings enabling replication + /// metrics and events. #[serde(default, skip_serializing_if = "Option::is_none")] pub metrics: Option, - /// A container specifying S3 Replication Time Control (S3 RTC) related information, including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. Must be specified together with a Metrics block. + /// A container specifying S3 Replication Time Control (S3 RTC) related information, + /// including whether S3 RTC is enabled and the time when all objects and operations + /// on objects must be replicated. Must be specified together with a Metrics + /// block. #[serde(default, skip_serializing_if = "Option::is_none", rename = "replicationTime")] pub replication_time: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "storageClass")] @@ -859,61 +1069,83 @@ pub struct BucketReplicationRulesDestinationAccessControlTranslation { pub owner: Option, } -/// Specifies encryption-related information for an Amazon S3 bucket that is a destination for replicated objects. +/// Specifies encryption-related information for an Amazon S3 bucket that is +/// a destination for replicated objects. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesDestinationEncryptionConfiguration { #[serde(default, skip_serializing_if = "Option::is_none", rename = "replicaKMSKeyID")] pub replica_kms_key_id: Option, } -/// A container specifying replication metrics-related settings enabling replication metrics and events. +/// A container specifying replication metrics-related settings enabling replication +/// metrics and events. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesDestinationMetrics { - /// A container specifying the time value for S3 Replication Time Control (S3 RTC) and replication metrics EventThreshold. + /// A container specifying the time value for S3 Replication Time Control (S3 + /// RTC) and replication metrics EventThreshold. #[serde(default, skip_serializing_if = "Option::is_none", rename = "eventThreshold")] pub event_threshold: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option, } -/// A container specifying the time value for S3 Replication Time Control (S3 RTC) and replication metrics EventThreshold. +/// A container specifying the time value for S3 Replication Time Control (S3 +/// RTC) and replication metrics EventThreshold. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesDestinationMetricsEventThreshold { #[serde(default, skip_serializing_if = "Option::is_none")] pub minutes: Option, } -/// A container specifying S3 Replication Time Control (S3 RTC) related information, including whether S3 RTC is enabled and the time when all objects and operations on objects must be replicated. Must be specified together with a Metrics block. +/// A container specifying S3 Replication Time Control (S3 RTC) related information, +/// including whether S3 RTC is enabled and the time when all objects and operations +/// on objects must be replicated. Must be specified together with a Metrics +/// block. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesDestinationReplicationTime { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option, - /// A container specifying the time value for S3 Replication Time Control (S3 RTC) and replication metrics EventThreshold. + /// A container specifying the time value for S3 Replication Time Control (S3 + /// RTC) and replication metrics EventThreshold. #[serde(default, skip_serializing_if = "Option::is_none")] pub time: Option, } -/// A container specifying the time value for S3 Replication Time Control (S3 RTC) and replication metrics EventThreshold. +/// A container specifying the time value for S3 Replication Time Control (S3 +/// RTC) and replication metrics EventThreshold. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesDestinationReplicationTimeTime { #[serde(default, skip_serializing_if = "Option::is_none")] pub minutes: Option, } -/// Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) in the Amazon S3 User Guide. +/// Optional configuration to replicate existing source bucket objects. For more +/// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesExistingObjectReplication { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option, } -/// A filter that identifies the subset of objects to which the replication rule applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +/// A filter that identifies the subset of objects to which the replication rule +/// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesFilter { - /// A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. - /// For example: - /// * If you specify both a Prefix and a Tag filter, wrap these filters in an And tag. - /// * If you specify a filter based on multiple tags, wrap the Tag elements in an And tag. + /// A container for specifying rule filters. The filters determine the subset + /// of objects to which the rule applies. This element is required only if you + /// specify more than one filter. + /// + /// + /// For example: + /// + /// + /// * If you specify both a Prefix and a Tag filter, wrap these filters in + /// an And tag. + /// + /// + /// * If you specify a filter based on multiple tags, wrap the Tag elements + /// in an And tag. #[serde(default, skip_serializing_if = "Option::is_none")] pub and: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -923,10 +1155,20 @@ pub struct BucketReplicationRulesFilter { pub tag: Option, } -/// A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. -/// For example: -/// * If you specify both a Prefix and a Tag filter, wrap these filters in an And tag. -/// * If you specify a filter based on multiple tags, wrap the Tag elements in an And tag. +/// A container for specifying rule filters. The filters determine the subset +/// of objects to which the rule applies. This element is required only if you +/// specify more than one filter. +/// +/// +/// For example: +/// +/// +/// * If you specify both a Prefix and a Tag filter, wrap these filters in +/// an And tag. +/// +/// +/// * If you specify a filter based on multiple tags, wrap the Tag elements +/// in an And tag. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesFilterAnd { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -953,27 +1195,50 @@ pub struct BucketReplicationRulesFilterTag { pub value: Option, } -/// A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer managed key stored in Amazon Web Services Key Management Service (SSE-KMS). +/// A container that describes additional filters for identifying the source +/// objects that you want to replicate. You can choose to enable or disable the +/// replication of these objects. Currently, Amazon S3 supports only the filter +/// that you can specify for objects created with server-side encryption using +/// a customer managed key stored in Amazon Web Services Key Management Service +/// (SSE-KMS). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesSourceSelectionCriteria { - /// A filter that you can specify for selection for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when Filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas. - /// If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, this element is not allowed. + /// A filter that you can specify for selection for modifications on replicas. + /// Amazon S3 doesn't replicate replica modifications by default. In the latest + /// version of replication configuration (when Filter is specified), you can + /// specify this element and set the status to Enabled to replicate modifications + /// on replicas. + /// + /// + /// If you don't specify the Filter element, Amazon S3 assumes that the replication + /// configuration is the earlier version, V1. In the earlier version, this element + /// is not allowed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "replicaModifications")] pub replica_modifications: Option, - /// A container for filter information for the selection of S3 objects encrypted with Amazon Web Services KMS. + /// A container for filter information for the selection of S3 objects encrypted + /// with Amazon Web Services KMS. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sseKMSEncryptedObjects")] pub sse_kms_encrypted_objects: Option, } -/// A filter that you can specify for selection for modifications on replicas. Amazon S3 doesn't replicate replica modifications by default. In the latest version of replication configuration (when Filter is specified), you can specify this element and set the status to Enabled to replicate modifications on replicas. -/// If you don't specify the Filter element, Amazon S3 assumes that the replication configuration is the earlier version, V1. In the earlier version, this element is not allowed. +/// A filter that you can specify for selection for modifications on replicas. +/// Amazon S3 doesn't replicate replica modifications by default. In the latest +/// version of replication configuration (when Filter is specified), you can +/// specify this element and set the status to Enabled to replicate modifications +/// on replicas. +/// +/// +/// If you don't specify the Filter element, Amazon S3 assumes that the replication +/// configuration is the earlier version, V1. In the earlier version, this element +/// is not allowed. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesSourceSelectionCriteriaReplicaModifications { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option, } -/// A container for filter information for the selection of S3 objects encrypted with Amazon Web Services KMS. +/// A container for filter information for the selection of S3 objects encrypted +/// with Amazon Web Services KMS. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketReplicationRulesSourceSelectionCriteriaSseKmsEncryptedObjects { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -1019,7 +1284,8 @@ pub struct BucketWebsite { /// Container for the Suffix element. #[serde(default, skip_serializing_if = "Option::is_none", rename = "indexDocument")] pub index_document: Option, - /// Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket. + /// Specifies the redirect behavior of all requests to a website endpoint of + /// an Amazon S3 bucket. #[serde(default, skip_serializing_if = "Option::is_none", rename = "redirectAllRequestsTo")] pub redirect_all_requests_to: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "routingRules")] @@ -1040,7 +1306,8 @@ pub struct BucketWebsiteIndexDocument { pub suffix: Option, } -/// Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket. +/// Specifies the redirect behavior of all requests to a website endpoint of +/// an Amazon S3 bucket. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketWebsiteRedirectAllRequestsTo { #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostName")] @@ -1049,18 +1316,28 @@ pub struct BucketWebsiteRedirectAllRequestsTo { pub protocol: Option, } -/// Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) in the Amazon S3 User Guide. +/// Specifies the redirect behavior and when a redirect is applied. For more +/// information about routing rules, see Configuring advanced conditional redirects +/// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +/// in the Amazon S3 User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketWebsiteRoutingRules { - /// A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error. + /// A container for describing a condition that must be met for the specified + /// redirect to apply. For example, 1. If request is for pages in the /docs folder, + /// redirect to the /documents folder. 2. If request results in HTTP error 4xx, + /// redirect request to another host where you might process the error. #[serde(default, skip_serializing_if = "Option::is_none")] pub condition: Option, - /// Specifies how requests are redirected. In the event of an error, you can specify a different error code to return. + /// Specifies how requests are redirected. In the event of an error, you can + /// specify a different error code to return. #[serde(default, skip_serializing_if = "Option::is_none")] pub redirect: Option, } -/// A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error. +/// A container for describing a condition that must be met for the specified +/// redirect to apply. For example, 1. If request is for pages in the /docs folder, +/// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +/// redirect request to another host where you might process the error. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketWebsiteRoutingRulesCondition { #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpErrorCodeReturnedEquals")] @@ -1069,7 +1346,8 @@ pub struct BucketWebsiteRoutingRulesCondition { pub key_prefix_equals: Option, } -/// Specifies how requests are redirected. In the event of an error, you can specify a different error code to return. +/// Specifies how requests are redirected. In the event of an error, you can +/// specify a different error code to return. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketWebsiteRoutingRulesRedirect { #[serde(default, skip_serializing_if = "Option::is_none", rename = "hostName")] @@ -1087,10 +1365,15 @@ pub struct BucketWebsiteRoutingRulesRedirect { /// BucketStatus defines the observed state of Bucket #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// A forward slash followed by the name of the bucket. @@ -1098,20 +1381,32 @@ pub struct BucketStatus { pub location: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct BucketStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/apps.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/apps.rs index 8a7d42eee..010bca92f 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/apps.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/apps.rs @@ -21,11 +21,19 @@ pub struct AppSpec { /// The domain ID. #[serde(rename = "domainID")] pub domain_id: String, - /// The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. - /// The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. + /// The instance type and the Amazon Resource Name (ARN) of the SageMaker image + /// created on the instance. + /// + /// + /// The value of InstanceType passed as part of the ResourceSpec in the CreateApp + /// call overrides the value passed as part of the ResourceSpec configured for + /// the user profile or the domain. If InstanceType is not specified in any of + /// those three ResourceSpec values for a KernelGateway app, the CreateApp call + /// fails with a request validation error. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceSpec")] pub resource_spec: Option, - /// Each tag consists of a key and an optional value. Tag keys must be unique per resource. + /// Each tag consists of a key and an optional value. Tag keys must be unique + /// per resource. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// The user profile name. If this value is not set, then SpaceName must be set. @@ -33,8 +41,15 @@ pub struct AppSpec { pub user_profile_name: Option, } -/// The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. -/// The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error. +/// The instance type and the Amazon Resource Name (ARN) of the SageMaker image +/// created on the instance. +/// +/// +/// The value of InstanceType passed as part of the ResourceSpec in the CreateApp +/// call overrides the value passed as part of the ResourceSpec configured for +/// the user profile or the domain. If InstanceType is not specified in any of +/// those three ResourceSpec values for a KernelGateway app, the CreateApp call +/// fails with a request validation error. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AppResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -47,9 +62,21 @@ pub struct AppResourceSpec { pub sage_maker_image_version_arn: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AppTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -61,10 +88,15 @@ pub struct AppTags { /// AppStatus defines the observed state of App #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AppStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The status. @@ -72,20 +104,32 @@ pub struct AppStatus { pub status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AppStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct AppStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/dataqualityjobdefinitions.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/dataqualityjobdefinitions.rs index 54048f551..e8c857054 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/dataqualityjobdefinitions.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/dataqualityjobdefinitions.rs @@ -19,7 +19,8 @@ pub struct DataQualityJobDefinitionSpec { /// Configures the constraints and baselines for the monitoring job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataQualityBaselineConfig")] pub data_quality_baseline_config: Option, - /// A list of inputs for the monitoring job. Currently endpoints are supported as monitoring inputs. + /// A list of inputs for the monitoring job. Currently endpoints are supported + /// as monitoring inputs. #[serde(rename = "dataQualityJobInput")] pub data_quality_job_input: DataQualityJobDefinitionDataQualityJobInput, /// The output configuration for monitoring jobs. @@ -34,13 +35,16 @@ pub struct DataQualityJobDefinitionSpec { /// Specifies networking configuration for the monitoring job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkConfig")] pub network_config: Option, - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + /// to perform tasks on your behalf. #[serde(rename = "roleARN")] pub role_arn: String, /// A time limit for how long the monitoring job is allowed to run before stopping. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppingCondition")] pub stopping_condition: Option, - /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide. + /// (Optional) An array of key-value pairs. For more information, see Using Cost + /// Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + /// in the Amazon Web Services Billing and Cost Management User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -89,7 +93,8 @@ pub struct DataQualityJobDefinitionDataQualityBaselineConfigStatisticsResource { pub s3_uri: Option, } -/// A list of inputs for the monitoring job. Currently endpoints are supported as monitoring inputs. +/// A list of inputs for the monitoring job. Currently endpoints are supported +/// as monitoring inputs. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionDataQualityJobInput { /// Input object for the endpoint @@ -134,12 +139,14 @@ pub struct DataQualityJobDefinitionDataQualityJobOutputConfig { /// The output object for a monitoring job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionDataQualityJobOutputConfigMonitoringOutputs { - /// Information about where and how you want to store the results of a monitoring job. + /// Information about where and how you want to store the results of a monitoring + /// job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3Output")] pub s3_output: Option, } -/// Information about where and how you want to store the results of a monitoring job. +/// Information about where and how you want to store the results of a monitoring +/// job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionDataQualityJobOutputConfigMonitoringOutputsS3Output { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -178,12 +185,20 @@ pub struct DataQualityJobDefinitionNetworkConfig { pub enable_inter_container_traffic_encryption: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionNetworkConfigVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -199,9 +214,21 @@ pub struct DataQualityJobDefinitionStoppingCondition { pub max_runtime_in_seconds: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -213,28 +240,45 @@ pub struct DataQualityJobDefinitionTags { /// DataQualityJobDefinitionStatus defines the observed state of DataQualityJobDefinition #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DataQualityJobDefinitionStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/domains.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/domains.rs index 5c7534b8a..d0e9e53a1 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/domains.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/domains.rs @@ -12,19 +12,32 @@ use serde::{Serialize, Deserialize}; #[kube(status = "DomainStatus")] #[kube(schema = "disabled")] pub struct DomainSpec { - /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. - /// * PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access - /// * VpcOnly - All Studio traffic is through the specified VPC and subnets + /// Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. + /// + /// + /// * PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon + /// SageMaker, which allows direct internet access + /// + /// + /// * VpcOnly - All Studio traffic is through the specified VPC and subnets #[serde(default, skip_serializing_if = "Option::is_none", rename = "appNetworkAccessType")] pub app_network_access_type: Option, - /// The entity that creates and manages the required security groups for inter-app communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. + /// The entity that creates and manages the required security groups for inter-app + /// communication in VPCOnly mode. Required when CreateDomain.AppNetworkAccessType + /// is VPCOnly and DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn + /// is provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "appSecurityGroupManagement")] pub app_security_group_management: Option, /// The mode of authentication that members use to access the domain. #[serde(rename = "authMode")] pub auth_mode: String, - /// The default settings to use to create a user profile when UserSettings isn't specified in the call to the CreateUserProfile API. - /// SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain. + /// The default settings to use to create a user profile when UserSettings isn't + /// specified in the call to the CreateUserProfile API. + /// + /// + /// SecurityGroups is aggregated when specified in both calls. For all other + /// settings in UserSettings, the values specified in CreateUserProfile take + /// precedence over those specified in CreateDomain. #[serde(rename = "defaultUserSettings")] pub default_user_settings: DomainDefaultUserSettings, /// A name for the domain. @@ -36,14 +49,21 @@ pub struct DomainSpec { /// Use KmsKeyId. #[serde(default, skip_serializing_if = "Option::is_none", rename = "homeEFSFileSystemKMSKeyID")] pub home_efs_file_system_kms_key_id: Option, - /// SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. + /// SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached + /// to the domain with an Amazon Web Services managed key by default. For more + /// control, specify a customer managed key. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] pub kms_key_id: Option, /// The VPC subnets that Studio uses for communication. #[serde(rename = "subnetIDs")] pub subnet_i_ds: Vec, - /// Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. - /// Tags that you specify for the Domain are also added to all Apps that the Domain launches. + /// Tags to associated with the Domain. Each tag consists of a key and an optional + /// value. Tag keys must be unique per resource. Tags are searchable using the + /// Search API. + /// + /// + /// Tags that you specify for the Domain are also added to all Apps that the + /// Domain launches. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. @@ -51,8 +71,13 @@ pub struct DomainSpec { pub vpc_id: String, } -/// The default settings to use to create a user profile when UserSettings isn't specified in the call to the CreateUserProfile API. -/// SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain. +/// The default settings to use to create a user profile when UserSettings isn't +/// specified in the call to the CreateUserProfile API. +/// +/// +/// SecurityGroups is aggregated when specified in both calls. For all other +/// settings in UserSettings, the values specified in CreateUserProfile take +/// precedence over those specified in CreateDomain. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "executionRole")] @@ -63,12 +88,17 @@ pub struct DomainDefaultUserSettings { /// The KernelGateway app settings. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelGatewayAppSettings")] pub kernel_gateway_app_settings: Option, - /// A collection of settings that configure user interaction with the RStudioServerPro app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro app must be deleted and a new one created to make any changes. + /// A collection of settings that configure user interaction with the RStudioServerPro + /// app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro + /// app must be deleted and a new one created to make any changes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "rStudioServerProAppSettings")] pub r_studio_server_pro_app_settings: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroups")] pub security_groups: Option>, - /// Specifies options for sharing SageMaker Studio notebooks. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called. When SharingSettings is not specified, notebook sharing isn't allowed. + /// Specifies options for sharing SageMaker Studio notebooks. These settings + /// are specified as part of DefaultUserSettings when the CreateDomain API is + /// called, and as part of UserSettings when the CreateUserProfile API is called. + /// When SharingSettings is not specified, notebook sharing isn't allowed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sharingSettings")] pub sharing_settings: Option, /// The TensorBoard app settings. @@ -79,14 +109,16 @@ pub struct DomainDefaultUserSettings { /// The JupyterServer app settings. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsJupyterServerAppSettings { - /// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. + /// Specifies the ARN's of a SageMaker image and SageMaker image version, and + /// the instance type that the version runs on. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultResourceSpec")] pub default_resource_spec: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleConfigARNs")] pub lifecycle_config_ar_ns: Option>, } -/// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. +/// Specifies the ARN's of a SageMaker image and SageMaker image version, and +/// the instance type that the version runs on. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsJupyterServerAppSettingsDefaultResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -104,14 +136,16 @@ pub struct DomainDefaultUserSettingsJupyterServerAppSettingsDefaultResourceSpec pub struct DomainDefaultUserSettingsKernelGatewayAppSettings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "customImages")] pub custom_images: Option>, - /// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. + /// Specifies the ARN's of a SageMaker image and SageMaker image version, and + /// the instance type that the version runs on. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultResourceSpec")] pub default_resource_spec: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleConfigARNs")] pub lifecycle_config_ar_ns: Option>, } -/// A custom SageMaker image. For more information, see Bring your own SageMaker image (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html). +/// A custom SageMaker image. For more information, see Bring your own SageMaker +/// image (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsKernelGatewayAppSettingsCustomImages { #[serde(default, skip_serializing_if = "Option::is_none", rename = "appImageConfigName")] @@ -122,7 +156,8 @@ pub struct DomainDefaultUserSettingsKernelGatewayAppSettingsCustomImages { pub image_version_number: Option, } -/// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. +/// Specifies the ARN's of a SageMaker image and SageMaker image version, and +/// the instance type that the version runs on. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -135,7 +170,9 @@ pub struct DomainDefaultUserSettingsKernelGatewayAppSettingsDefaultResourceSpec pub sage_maker_image_version_arn: Option, } -/// A collection of settings that configure user interaction with the RStudioServerPro app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro app must be deleted and a new one created to make any changes. +/// A collection of settings that configure user interaction with the RStudioServerPro +/// app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro +/// app must be deleted and a new one created to make any changes. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsRStudioServerProAppSettings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessStatus")] @@ -144,7 +181,10 @@ pub struct DomainDefaultUserSettingsRStudioServerProAppSettings { pub user_group: Option, } -/// Specifies options for sharing SageMaker Studio notebooks. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called. When SharingSettings is not specified, notebook sharing isn't allowed. +/// Specifies options for sharing SageMaker Studio notebooks. These settings +/// are specified as part of DefaultUserSettings when the CreateDomain API is +/// called, and as part of UserSettings when the CreateUserProfile API is called. +/// When SharingSettings is not specified, notebook sharing isn't allowed. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsSharingSettings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "notebookOutputOption")] @@ -158,12 +198,14 @@ pub struct DomainDefaultUserSettingsSharingSettings { /// The TensorBoard app settings. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsTensorBoardAppSettings { - /// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. + /// Specifies the ARN's of a SageMaker image and SageMaker image version, and + /// the instance type that the version runs on. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultResourceSpec")] pub default_resource_spec: Option, } -/// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. +/// Specifies the ARN's of a SageMaker image and SageMaker image version, and +/// the instance type that the version runs on. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDefaultUserSettingsTensorBoardAppSettingsDefaultResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -179,17 +221,20 @@ pub struct DomainDefaultUserSettingsTensorBoardAppSettingsDefaultResourceSpec { /// A collection of Domain settings. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDomainSettings { - /// A collection of settings that configure the RStudioServerPro Domain-level app. + /// A collection of settings that configure the RStudioServerPro Domain-level + /// app. #[serde(default, skip_serializing_if = "Option::is_none", rename = "rStudioServerProDomainSettings")] pub r_studio_server_pro_domain_settings: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] pub security_group_i_ds: Option>, } -/// A collection of settings that configure the RStudioServerPro Domain-level app. +/// A collection of settings that configure the RStudioServerPro Domain-level +/// app. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDomainSettingsRStudioServerProDomainSettings { - /// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. + /// Specifies the ARN's of a SageMaker image and SageMaker image version, and + /// the instance type that the version runs on. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultResourceSpec")] pub default_resource_spec: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "domainExecutionRoleARN")] @@ -200,7 +245,8 @@ pub struct DomainDomainSettingsRStudioServerProDomainSettings { pub r_studio_package_manager_url: Option, } -/// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. +/// Specifies the ARN's of a SageMaker image and SageMaker image version, and +/// the instance type that the version runs on. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainDomainSettingsRStudioServerProDomainSettingsDefaultResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -213,9 +259,21 @@ pub struct DomainDomainSettingsRStudioServerProDomainSettingsDefaultResourceSpec pub sage_maker_image_version_arn: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -227,10 +285,15 @@ pub struct DomainTags { /// DomainStatus defines the observed state of Domain #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The domain ID. @@ -244,20 +307,32 @@ pub struct DomainStatus { pub url: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct DomainStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpointconfigs.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpointconfigs.rs index 9e952fcc5..0c6f7044e 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpointconfigs.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpointconfigs.rs @@ -12,39 +12,81 @@ use serde::{Serialize, Deserialize}; #[kube(status = "EndpointConfigStatus")] #[kube(schema = "disabled")] pub struct EndpointConfigSpec { - /// Specifies configuration for how an endpoint performs asynchronous inference. This is a required field in order for your Endpoint to be invoked using InvokeEndpointAsync (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpointAsync.html). + /// Specifies configuration for how an endpoint performs asynchronous inference. + /// This is a required field in order for your Endpoint to be invoked using InvokeEndpointAsync + /// (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpointAsync.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "asyncInferenceConfig")] pub async_inference_config: Option, /// Configuration to control how SageMaker captures inference data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataCaptureConfig")] pub data_capture_config: Option, - /// The name of the endpoint configuration. You specify this name in a CreateEndpoint request. + /// The name of the endpoint configuration. You specify this name in a CreateEndpoint + /// request. #[serde(rename = "endpointConfigName")] pub endpoint_config_name: String, - /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. - /// The KmsKeyId can be any of the following formats: - /// * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - /// * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - /// * Alias name: alias/ExampleAlias - /// * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias - /// The KMS key policy must grant permission to the IAM role that you specify in your CreateEndpoint, UpdateEndpoint requests. For more information, refer to the Amazon Web Services Key Management Service section Using Key Policies in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) - /// Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails. - /// For a list of instance types that support local instance storage, see Instance Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). - /// For more information about local instance storage encryption, see SSD Instance Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html). + /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service + /// key that SageMaker uses to encrypt data on the storage volume attached to + /// the ML compute instance that hosts the endpoint. + /// + /// + /// The KmsKeyId can be any of the following formats: + /// + /// + /// * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + /// + /// + /// * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + /// + /// + /// * Alias name: alias/ExampleAlias + /// + /// + /// * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + /// + /// + /// The KMS key policy must grant permission to the IAM role that you specify + /// in your CreateEndpoint, UpdateEndpoint requests. For more information, refer + /// to the Amazon Web Services Key Management Service section Using Key Policies + /// in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + /// + /// + /// Certain Nitro-based instances include local storage, dependent on the instance + /// type. Local storage volumes are encrypted using a hardware module on the + /// instance. You can't request a KmsKeyId when using an instance type with local + /// storage. If any of the models that you specify in the ProductionVariants + /// parameter use nitro-based instances with local storage, do not specify a + /// value for the KmsKeyId parameter. If you specify a value for KmsKeyId when + /// using any nitro-based instances with local storage, the call to CreateEndpointConfig + /// fails. + /// + /// + /// For a list of instance types that support local instance storage, see Instance + /// Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). + /// + /// + /// For more information about local instance storage encryption, see SSD Instance + /// Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] pub kms_key_id: Option, - /// An array of ProductionVariant objects, one for each model that you want to host at this endpoint. + /// An array of ProductionVariant objects, one for each model that you want to + /// host at this endpoint. #[serde(rename = "productionVariants")] pub production_variants: Vec, - /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + /// An array of key-value pairs. You can use tags to categorize your Amazon Web + /// Services resources in different ways, for example, by purpose, owner, or + /// environment. For more information, see Tagging Amazon Web Services Resources + /// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// Specifies configuration for how an endpoint performs asynchronous inference. This is a required field in order for your Endpoint to be invoked using InvokeEndpointAsync (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpointAsync.html). +/// Specifies configuration for how an endpoint performs asynchronous inference. +/// This is a required field in order for your Endpoint to be invoked using InvokeEndpointAsync +/// (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpointAsync.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigAsyncInferenceConfig { - /// Configures the behavior of the client used by SageMaker to interact with the model container during asynchronous inference. + /// Configures the behavior of the client used by SageMaker to interact with + /// the model container during asynchronous inference. #[serde(default, skip_serializing_if = "Option::is_none", rename = "clientConfig")] pub client_config: Option, /// Specifies the configuration for asynchronous inference invocation outputs. @@ -52,7 +94,8 @@ pub struct EndpointConfigAsyncInferenceConfig { pub output_config: Option, } -/// Configures the behavior of the client used by SageMaker to interact with the model container during asynchronous inference. +/// Configures the behavior of the client used by SageMaker to interact with +/// the model container during asynchronous inference. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigAsyncInferenceConfigClientConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxConcurrentInvocationsPerInstance")] @@ -64,14 +107,16 @@ pub struct EndpointConfigAsyncInferenceConfigClientConfig { pub struct EndpointConfigAsyncInferenceConfigOutputConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] pub kms_key_id: Option, - /// Specifies the configuration for notifications of inference results for asynchronous inference. + /// Specifies the configuration for notifications of inference results for asynchronous + /// inference. #[serde(default, skip_serializing_if = "Option::is_none", rename = "notificationConfig")] pub notification_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3OutputPath")] pub s3_output_path: Option, } -/// Specifies the configuration for notifications of inference results for asynchronous inference. +/// Specifies the configuration for notifications of inference results for asynchronous +/// inference. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigAsyncInferenceConfigOutputConfigNotificationConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "errorTopic")] @@ -83,7 +128,8 @@ pub struct EndpointConfigAsyncInferenceConfigOutputConfigNotificationConfig { /// Configuration to control how SageMaker captures inference data. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigDataCaptureConfig { - /// Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data. + /// Configuration specifying how to treat different headers. If no headers are + /// specified SageMaker will by default base64 encode when capturing the data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "captureContentTypeHeader")] pub capture_content_type_header: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "captureOptions")] @@ -98,7 +144,8 @@ pub struct EndpointConfigDataCaptureConfig { pub kms_key_id: Option, } -/// Configuration specifying how to treat different headers. If no headers are specified SageMaker will by default base64 encode when capturing the data. +/// Configuration specifying how to treat different headers. If no headers are +/// specified SageMaker will by default base64 encode when capturing the data. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigDataCaptureConfigCaptureContentTypeHeader { #[serde(default, skip_serializing_if = "Option::is_none", rename = "csvContentTypes")] @@ -114,14 +161,18 @@ pub struct EndpointConfigDataCaptureConfigCaptureOptions { pub capture_mode: Option, } -/// Identifies a model that you want to host and the resources chosen to deploy for hosting it. If you are deploying multiple models, tell SageMaker how to distribute traffic among the models by specifying variant weights. For more information on production variants, check Production variants (https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html). +/// Identifies a model that you want to host and the resources chosen to deploy +/// for hosting it. If you are deploying multiple models, tell SageMaker how +/// to distribute traffic among the models by specifying variant weights. For +/// more information on production variants, check Production variants (https://docs.aws.amazon.com/sagemaker/latest/dg/model-ab-testing.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigProductionVariants { #[serde(default, skip_serializing_if = "Option::is_none", rename = "acceleratorType")] pub accelerator_type: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerStartupHealthCheckTimeoutInSeconds")] pub container_startup_health_check_timeout_in_seconds: Option, - /// Specifies configuration for a core dump from the model container when the process crashes. + /// Specifies configuration for a core dump from the model container when the + /// process crashes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "coreDumpConfig")] pub core_dump_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableSSMAccess")] @@ -145,7 +196,8 @@ pub struct EndpointConfigProductionVariants { pub volume_size_in_gb: Option, } -/// Specifies configuration for a core dump from the model container when the process crashes. +/// Specifies configuration for a core dump from the model container when the +/// process crashes. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigProductionVariantsCoreDumpConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "destinationS3URI")] @@ -163,9 +215,21 @@ pub struct EndpointConfigProductionVariantsServerlessConfig { pub memory_size_in_mb: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -177,28 +241,45 @@ pub struct EndpointConfigTags { /// EndpointConfigStatus defines the observed state of EndpointConfig #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointConfigStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpoints.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpoints.rs index 328eb2b12..776dcf6e4 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpoints.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/endpoints.rs @@ -5,40 +5,56 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// EndpointSpec defines the desired state of Endpoint. -/// A hosted endpoint for real-time inference. +/// EndpointSpec defines the desired state of Endpoint. +/// +/// +/// A hosted endpoint for real-time inference. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "Endpoint", plural = "endpoints")] #[kube(namespaced)] #[kube(status = "EndpointStatus")] #[kube(schema = "disabled")] pub struct EndpointSpec { - /// The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. + /// The deployment configuration for an endpoint, which contains the desired + /// deployment strategy and rollback configurations. #[serde(default, skip_serializing_if = "Option::is_none", rename = "deploymentConfig")] pub deployment_config: Option, /// The name of an endpoint configuration. For more information, see CreateEndpointConfig. #[serde(rename = "endpointConfigName")] pub endpoint_config_name: String, - /// The name of the endpoint.The name must be unique within an Amazon Web Services Region in your Amazon Web Services account. The name is case-insensitive in CreateEndpoint, but the case is preserved and must be matched in . + /// The name of the endpoint.The name must be unique within an Amazon Web Services + /// Region in your Amazon Web Services account. The name is case-insensitive + /// in CreateEndpoint, but the case is preserved and must be matched in . #[serde(rename = "endpointName")] pub endpoint_name: String, - /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + /// An array of key-value pairs. You can use tags to categorize your Amazon Web + /// Services resources in different ways, for example, by purpose, owner, or + /// environment. For more information, see Tagging Amazon Web Services Resources + /// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// The deployment configuration for an endpoint, which contains the desired deployment strategy and rollback configurations. +/// The deployment configuration for an endpoint, which contains the desired +/// deployment strategy and rollback configurations. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointDeploymentConfig { - /// Automatic rollback configuration for handling endpoint deployment failures and recovery. + /// Automatic rollback configuration for handling endpoint deployment failures + /// and recovery. #[serde(default, skip_serializing_if = "Option::is_none", rename = "autoRollbackConfiguration")] pub auto_rollback_configuration: Option, - /// Update policy for a blue/green deployment. If this update policy is specified, SageMaker creates a new fleet during the deployment while maintaining the old fleet. SageMaker flips traffic to the new fleet according to the specified traffic routing configuration. Only one update policy should be used in the deployment configuration. If no update policy is specified, SageMaker uses a blue/green deployment strategy with all at once traffic shifting by default. + /// Update policy for a blue/green deployment. If this update policy is specified, + /// SageMaker creates a new fleet during the deployment while maintaining the + /// old fleet. SageMaker flips traffic to the new fleet according to the specified + /// traffic routing configuration. Only one update policy should be used in the + /// deployment configuration. If no update policy is specified, SageMaker uses + /// a blue/green deployment strategy with all at once traffic shifting by default. #[serde(default, skip_serializing_if = "Option::is_none", rename = "blueGreenUpdatePolicy")] pub blue_green_update_policy: Option, } -/// Automatic rollback configuration for handling endpoint deployment failures and recovery. +/// Automatic rollback configuration for handling endpoint deployment failures +/// and recovery. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointDeploymentConfigAutoRollbackConfiguration { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -52,19 +68,26 @@ pub struct EndpointDeploymentConfigAutoRollbackConfigurationAlarms { pub alarm_name: Option, } -/// Update policy for a blue/green deployment. If this update policy is specified, SageMaker creates a new fleet during the deployment while maintaining the old fleet. SageMaker flips traffic to the new fleet according to the specified traffic routing configuration. Only one update policy should be used in the deployment configuration. If no update policy is specified, SageMaker uses a blue/green deployment strategy with all at once traffic shifting by default. +/// Update policy for a blue/green deployment. If this update policy is specified, +/// SageMaker creates a new fleet during the deployment while maintaining the +/// old fleet. SageMaker flips traffic to the new fleet according to the specified +/// traffic routing configuration. Only one update policy should be used in the +/// deployment configuration. If no update policy is specified, SageMaker uses +/// a blue/green deployment strategy with all at once traffic shifting by default. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointDeploymentConfigBlueGreenUpdatePolicy { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maximumExecutionTimeoutInSeconds")] pub maximum_execution_timeout_in_seconds: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "terminationWaitInSeconds")] pub termination_wait_in_seconds: Option, - /// Defines the traffic routing strategy during an endpoint deployment to shift traffic from the old fleet to the new fleet. + /// Defines the traffic routing strategy during an endpoint deployment to shift + /// traffic from the old fleet to the new fleet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "trafficRoutingConfiguration")] pub traffic_routing_configuration: Option, } -/// Defines the traffic routing strategy during an endpoint deployment to shift traffic from the old fleet to the new fleet. +/// Defines the traffic routing strategy during an endpoint deployment to shift +/// traffic from the old fleet to the new fleet. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointDeploymentConfigBlueGreenUpdatePolicyTrafficRoutingConfiguration { /// Specifies the endpoint capacity to activate for production. @@ -97,9 +120,21 @@ pub struct EndpointDeploymentConfigBlueGreenUpdatePolicyTrafficRoutingConfigurat pub value: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -111,24 +146,56 @@ pub struct EndpointTags { /// EndpointStatus defines the observed state of Endpoint #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// A timestamp that shows when the endpoint was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationTime")] pub creation_time: Option, - /// The status of the endpoint. - /// * OutOfService: Endpoint is not available to take incoming requests. - /// * Creating: CreateEndpoint is executing. - /// * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. - /// * SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or deleted or re-scaled until it has completed. This maintenance operation does not change any customer-specified values such as VPC config, KMS encryption, model, instance type, or instance count. - /// * RollingBack: Endpoint fails to scale up or down or change its variant weight and is in the process of rolling back to its previous configuration. Once the rollback completes, endpoint returns to an InService status. This transitional status only applies to an endpoint that has autoscaling enabled and is undergoing variant weight or capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called explicitly. - /// * InService: Endpoint is available to process incoming requests. - /// * Deleting: DeleteEndpoint is executing. - /// * Failed: Endpoint could not be created, updated, or re-scaled. Use DescribeEndpointOutput$FailureReason for information about the failure. DeleteEndpoint is the only operation that can be performed on a failed endpoint. + /// The status of the endpoint. + /// + /// + /// * OutOfService: Endpoint is not available to take incoming requests. + /// + /// + /// * Creating: CreateEndpoint is executing. + /// + /// + /// * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. + /// + /// + /// * SystemUpdating: Endpoint is undergoing maintenance and cannot be updated + /// or deleted or re-scaled until it has completed. This maintenance operation + /// does not change any customer-specified values such as VPC config, KMS + /// encryption, model, instance type, or instance count. + /// + /// + /// * RollingBack: Endpoint fails to scale up or down or change its variant + /// weight and is in the process of rolling back to its previous configuration. + /// Once the rollback completes, endpoint returns to an InService status. + /// This transitional status only applies to an endpoint that has autoscaling + /// enabled and is undergoing variant weight or capacity changes as part of + /// an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities + /// operation is called explicitly. + /// + /// + /// * InService: Endpoint is available to process incoming requests. + /// + /// + /// * Deleting: DeleteEndpoint is executing. + /// + /// + /// * Failed: Endpoint could not be created, updated, or re-scaled. Use DescribeEndpointOutput$FailureReason + /// for information about the failure. DeleteEndpoint is the only operation + /// that can be performed on a failed endpoint. #[serde(default, skip_serializing_if = "Option::is_none", rename = "endpointStatus")] pub endpoint_status: Option, /// If the status of the endpoint is Failed, the reason why it failed. @@ -137,28 +204,42 @@ pub struct EndpointStatus { /// A timestamp that shows when the endpoint was last modified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastModifiedTime")] pub last_modified_time: Option, - /// Returns the summary of an in-progress deployment. This field is only returned when the endpoint is creating or updating with a new endpoint configuration. + /// Returns the summary of an in-progress deployment. This field is only returned + /// when the endpoint is creating or updating with a new endpoint configuration. #[serde(default, skip_serializing_if = "Option::is_none", rename = "pendingDeploymentSummary")] pub pending_deployment_summary: Option, - /// An array of ProductionVariantSummary objects, one for each model hosted behind this endpoint. + /// An array of ProductionVariantSummary objects, one for each model hosted behind + /// this endpoint. #[serde(default, skip_serializing_if = "Option::is_none", rename = "productionVariants")] pub production_variants: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatusConditions { /// Last time the condition transitioned from one status to another. @@ -177,7 +258,8 @@ pub struct EndpointStatusConditions { pub r#type: String, } -/// Returns the summary of an in-progress deployment. This field is only returned when the endpoint is creating or updating with a new endpoint configuration. +/// Returns the summary of an in-progress deployment. This field is only returned +/// when the endpoint is creating or updating with a new endpoint configuration. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatusPendingDeploymentSummary { #[serde(default, skip_serializing_if = "Option::is_none", rename = "endpointConfigName")] @@ -188,7 +270,10 @@ pub struct EndpointStatusPendingDeploymentSummary { pub start_time: Option, } -/// The production variant summary for a deployment when an endpoint is creating or updating with the CreateEndpoint or UpdateEndpoint operations. Describes the VariantStatus , weight and capacity for a production variant associated with an endpoint. +/// The production variant summary for a deployment when an endpoint is creating +/// or updating with the CreateEndpoint or UpdateEndpoint operations. Describes +/// the VariantStatus , weight and capacity for a production variant associated +/// with an endpoint. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatusPendingDeploymentSummaryProductionVariants { #[serde(default, skip_serializing_if = "Option::is_none", rename = "acceleratorType")] @@ -226,8 +311,16 @@ pub struct EndpointStatusPendingDeploymentSummaryProductionVariantsCurrentServer pub memory_size_in_mb: Option, } -/// Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant. -/// If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) in the Amazon ECR User Guide. +/// Gets the Amazon EC2 Container Registry path of the docker image of the model +/// that is hosted in this ProductionVariant. +/// +/// +/// If you used the registry/repository[:tag] form to specify the image path +/// of the primary container when you created the model hosted in this ProductionVariant, +/// the path resolves to a path of the form registry/repository[@digest]. A digest +/// is a hash value that identifies a specific version of an image. For information +/// about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) +/// in the Amazon ECR User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatusPendingDeploymentSummaryProductionVariantsDeployedImages { #[serde(default, skip_serializing_if = "Option::is_none", rename = "resolutionTime")] @@ -258,7 +351,10 @@ pub struct EndpointStatusPendingDeploymentSummaryProductionVariantsVariantStatus pub status_message: Option, } -/// Describes weight and capacities for a production variant associated with an endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities API and the endpoint status is Updating, you get different desired and current values. +/// Describes weight and capacities for a production variant associated with +/// an endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities +/// API and the endpoint status is Updating, you get different desired and current +/// values. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatusProductionVariants { #[serde(default, skip_serializing_if = "Option::is_none", rename = "currentInstanceCount")] @@ -292,8 +388,16 @@ pub struct EndpointStatusProductionVariantsCurrentServerlessConfig { pub memory_size_in_mb: Option, } -/// Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant. -/// If you used the registry/repository[:tag] form to specify the image path of the primary container when you created the model hosted in this ProductionVariant, the path resolves to a path of the form registry/repository[@digest]. A digest is a hash value that identifies a specific version of an image. For information about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) in the Amazon ECR User Guide. +/// Gets the Amazon EC2 Container Registry path of the docker image of the model +/// that is hosted in this ProductionVariant. +/// +/// +/// If you used the registry/repository[:tag] form to specify the image path +/// of the primary container when you created the model hosted in this ProductionVariant, +/// the path resolves to a path of the form registry/repository[@digest]. A digest +/// is a hash value that identifies a specific version of an image. For information +/// about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) +/// in the Amazon ECR User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct EndpointStatusProductionVariantsDeployedImages { #[serde(default, skip_serializing_if = "Option::is_none", rename = "resolutionTime")] diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/featuregroups.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/featuregroups.rs index 17f068ca3..b9b4ece0f 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/featuregroups.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/featuregroups.rs @@ -5,8 +5,13 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// FeatureGroupSpec defines the desired state of FeatureGroup. -/// Amazon SageMaker Feature Store stores features in a collection called Feature Group. A Feature Group can be visualized as a table which has rows, with a unique identifier for each row where each column in the table is a feature. In principle, a Feature Group is composed of features and values per features. +/// FeatureGroupSpec defines the desired state of FeatureGroup. +/// +/// +/// Amazon SageMaker Feature Store stores features in a collection called Feature +/// Group. A Feature Group can be visualized as a table which has rows, with +/// a unique identifier for each row where each column in the table is a feature. +/// In principle, a Feature Group is composed of features and values per features. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "FeatureGroup", plural = "featuregroups")] #[kube(namespaced)] @@ -16,44 +21,105 @@ pub struct FeatureGroupSpec { /// A free-form description of a FeatureGroup. #[serde(default, skip_serializing_if = "Option::is_none")] pub description: Option, - /// The name of the feature that stores the EventTime of a Record in a FeatureGroup. - /// An EventTime is a point in time when a new event occurs that corresponds to the creation or update of a Record in a FeatureGroup. All Records in the FeatureGroup must have a corresponding EventTime. - /// An EventTime can be a String or Fractional. - /// * Fractional: EventTime feature values must be a Unix timestamp in seconds. - /// * String: EventTime feature values must be an ISO-8601 string in the format. The following formats are supported yyyy-MM-dd'T'HH:mm:ssZ and yyyy-MM-dd'T'HH:mm:ss.SSSZ where yyyy, MM, and dd represent the year, month, and day respectively and HH, mm, ss, and if applicable, SSS represent the hour, month, second and milliseconds respsectively. 'T' and Z are constants. + /// The name of the feature that stores the EventTime of a Record in a FeatureGroup. + /// + /// + /// An EventTime is a point in time when a new event occurs that corresponds + /// to the creation or update of a Record in a FeatureGroup. All Records in the + /// FeatureGroup must have a corresponding EventTime. + /// + /// + /// An EventTime can be a String or Fractional. + /// + /// + /// * Fractional: EventTime feature values must be a Unix timestamp in seconds. + /// + /// + /// * String: EventTime feature values must be an ISO-8601 string in the format. + /// The following formats are supported yyyy-MM-dd'T'HH:mm:ssZ and yyyy-MM-dd'T'HH:mm:ss.SSSZ + /// where yyyy, MM, and dd represent the year, month, and day respectively + /// and HH, mm, ss, and if applicable, SSS represent the hour, month, second + /// and milliseconds respsectively. 'T' and Z are constants. #[serde(rename = "eventTimeFeatureName")] pub event_time_feature_name: String, - /// A list of Feature names and types. Name and Type is compulsory per Feature. - /// Valid feature FeatureTypes are Integral, Fractional and String. - /// FeatureNames cannot be any of the following: is_deleted, write_time, api_invocation_time - /// You can create up to 2,500 FeatureDefinitions per FeatureGroup. + /// A list of Feature names and types. Name and Type is compulsory per Feature. + /// + /// + /// Valid feature FeatureTypes are Integral, Fractional and String. + /// + /// + /// FeatureNames cannot be any of the following: is_deleted, write_time, api_invocation_time + /// + /// + /// You can create up to 2,500 FeatureDefinitions per FeatureGroup. #[serde(rename = "featureDefinitions")] pub feature_definitions: Vec, - /// The name of the FeatureGroup. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account. The name: - /// * Must start and end with an alphanumeric character. - /// * Can only contain alphanumeric character and hyphens. Spaces are not allowed. + /// The name of the FeatureGroup. The name must be unique within an Amazon Web + /// Services Region in an Amazon Web Services account. The name: + /// + /// + /// * Must start and end with an alphanumeric character. + /// + /// + /// * Can only contain alphanumeric character and hyphens. Spaces are not + /// allowed. #[serde(rename = "featureGroupName")] pub feature_group_name: String, - /// Use this to configure an OfflineFeatureStore. This parameter allows you to specify: - /// * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore. - /// * A configuration for an Amazon Web Services Glue or Amazon Web Services Hive data catalog. - /// * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore. If KMS encryption key is not specified, by default we encrypt all data at rest using Amazon Web Services KMS key. By defining your bucket-level key (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html) for SSE, you can reduce Amazon Web Services KMS requests costs by up to 99 percent. - /// * Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). - /// To learn more about this parameter, see OfflineStoreConfig. + /// Use this to configure an OfflineFeatureStore. This parameter allows you to + /// specify: + /// + /// + /// * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore. + /// + /// + /// * A configuration for an Amazon Web Services Glue or Amazon Web Services + /// Hive data catalog. + /// + /// + /// * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore. + /// If KMS encryption key is not specified, by default we encrypt all data + /// at rest using Amazon Web Services KMS key. By defining your bucket-level + /// key (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html) + /// for SSE, you can reduce Amazon Web Services KMS requests costs by up to + /// 99 percent. + /// + /// + /// * Format for the offline store table. Supported formats are Glue (Default) + /// and Apache Iceberg (https://iceberg.apache.org/). + /// + /// + /// To learn more about this parameter, see OfflineStoreConfig. #[serde(default, skip_serializing_if = "Option::is_none", rename = "offlineStoreConfig")] pub offline_store_config: Option, - /// You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore flag in OnlineStoreConfig; the default value is False. - /// You can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest encryption of the OnlineStore. + /// You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore + /// flag in OnlineStoreConfig; the default value is False. + /// + /// + /// You can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest + /// encryption of the OnlineStore. #[serde(default, skip_serializing_if = "Option::is_none", rename = "onlineStoreConfig")] pub online_store_config: Option, - /// The name of the Feature whose value uniquely identifies a Record defined in the FeatureStore. Only the latest record per identifier value will be stored in the OnlineStore. RecordIdentifierFeatureName must be one of feature definitions' names. - /// You use the RecordIdentifierFeatureName to access data in a FeatureStore. - /// This name: - /// * Must start and end with an alphanumeric character. - /// * Can only contains alphanumeric characters, hyphens, underscores. Spaces are not allowed. + /// The name of the Feature whose value uniquely identifies a Record defined + /// in the FeatureStore. Only the latest record per identifier value will be + /// stored in the OnlineStore. RecordIdentifierFeatureName must be one of feature + /// definitions' names. + /// + /// + /// You use the RecordIdentifierFeatureName to access data in a FeatureStore. + /// + /// + /// This name: + /// + /// + /// * Must start and end with an alphanumeric character. + /// + /// + /// * Can only contains alphanumeric characters, hyphens, underscores. Spaces + /// are not allowed. #[serde(rename = "recordIdentifierFeatureName")] pub record_identifier_feature_name: String, - /// The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided. + /// The Amazon Resource Name (ARN) of the IAM execution role used to persist + /// data into the OfflineStore if an OfflineStoreConfig is provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "roleARN")] pub role_arn: Option, /// Tags used to identify Features in each FeatureGroup. @@ -61,7 +127,8 @@ pub struct FeatureGroupSpec { pub tags: Option>, } -/// A list of features. You must include FeatureName and FeatureType. Valid feature FeatureTypes are Integral, Fractional and String. +/// A list of features. You must include FeatureName and FeatureType. Valid feature +/// FeatureTypes are Integral, Fractional and String. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupFeatureDefinitions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "featureName")] @@ -70,12 +137,30 @@ pub struct FeatureGroupFeatureDefinitions { pub feature_type: Option, } -/// Use this to configure an OfflineFeatureStore. This parameter allows you to specify: -/// * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore. -/// * A configuration for an Amazon Web Services Glue or Amazon Web Services Hive data catalog. -/// * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore. If KMS encryption key is not specified, by default we encrypt all data at rest using Amazon Web Services KMS key. By defining your bucket-level key (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html) for SSE, you can reduce Amazon Web Services KMS requests costs by up to 99 percent. -/// * Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg (https://iceberg.apache.org/). -/// To learn more about this parameter, see OfflineStoreConfig. +/// Use this to configure an OfflineFeatureStore. This parameter allows you to +/// specify: +/// +/// +/// * The Amazon Simple Storage Service (Amazon S3) location of an OfflineStore. +/// +/// +/// * A configuration for an Amazon Web Services Glue or Amazon Web Services +/// Hive data catalog. +/// +/// +/// * An KMS encryption key to encrypt the Amazon S3 location used for OfflineStore. +/// If KMS encryption key is not specified, by default we encrypt all data +/// at rest using Amazon Web Services KMS key. By defining your bucket-level +/// key (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-key.html) +/// for SSE, you can reduce Amazon Web Services KMS requests costs by up to +/// 99 percent. +/// +/// +/// * Format for the offline store table. Supported formats are Glue (Default) +/// and Apache Iceberg (https://iceberg.apache.org/). +/// +/// +/// To learn more about this parameter, see OfflineStoreConfig. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupOfflineStoreConfig { /// The meta data of the Glue table which serves as data catalog for the OfflineStore. @@ -83,7 +168,8 @@ pub struct FeatureGroupOfflineStoreConfig { pub data_catalog_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "disableGlueTableCreation")] pub disable_glue_table_creation: Option, - /// The Amazon Simple Storage (Amazon S3) location and and security configuration for OfflineStore. + /// The Amazon Simple Storage (Amazon S3) location and and security configuration + /// for OfflineStore. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3StorageConfig")] pub s3_storage_config: Option, } @@ -99,7 +185,8 @@ pub struct FeatureGroupOfflineStoreConfigDataCatalogConfig { pub table_name: Option, } -/// The Amazon Simple Storage (Amazon S3) location and and security configuration for OfflineStore. +/// The Amazon Simple Storage (Amazon S3) location and and security configuration +/// for OfflineStore. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupOfflineStoreConfigS3StorageConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] @@ -110,8 +197,12 @@ pub struct FeatureGroupOfflineStoreConfigS3StorageConfig { pub s3_uri: Option, } -/// You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore flag in OnlineStoreConfig; the default value is False. -/// You can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest encryption of the OnlineStore. +/// You can turn the OnlineStore on or off by specifying True for the EnableOnlineStore +/// flag in OnlineStoreConfig; the default value is False. +/// +/// +/// You can also include an Amazon Web Services KMS key ID (KMSKeyId) for at-rest +/// encryption of the OnlineStore. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupOnlineStoreConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableOnlineStore")] @@ -128,9 +219,21 @@ pub struct FeatureGroupOnlineStoreConfigSecurityConfig { pub kms_key_id: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -142,15 +245,25 @@ pub struct FeatureGroupTags { /// FeatureGroupStatus defines the observed state of FeatureGroup #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// The reason that the FeatureGroup failed to be replicated in the OfflineStore. This is failure can occur because: - /// * The FeatureGroup could not be created in the OfflineStore. - /// * The FeatureGroup could not be deleted from the OfflineStore. + /// The reason that the FeatureGroup failed to be replicated in the OfflineStore. + /// This is failure can occur because: + /// + /// + /// * The FeatureGroup could not be created in the OfflineStore. + /// + /// + /// * The FeatureGroup could not be deleted from the OfflineStore. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] pub failure_reason: Option, /// The status of the feature group. @@ -158,20 +271,32 @@ pub struct FeatureGroupStatus { pub feature_group_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct FeatureGroupStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/hyperparametertuningjobs.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/hyperparametertuningjobs.rs index 932947062..f1c211e21 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/hyperparametertuningjobs.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/hyperparametertuningjobs.rs @@ -13,43 +13,93 @@ use std::collections::BTreeMap; #[kube(status = "HyperParameterTuningJobStatus")] #[kube(schema = "disabled")] pub struct HyperParameterTuningJobSpec { - /// The HyperParameterTuningJobConfig object that describes the tuning job, including the search strategy, the objective metric used to evaluate training jobs, ranges of parameters to search, and resource limits for the tuning job. For more information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). + /// The HyperParameterTuningJobConfig object that describes the tuning job, including + /// the search strategy, the objective metric used to evaluate training jobs, + /// ranges of parameters to search, and resource limits for the tuning job. For + /// more information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). #[serde(rename = "hyperParameterTuningJobConfig")] pub hyper_parameter_tuning_job_config: HyperParameterTuningJobHyperParameterTuningJobConfig, - /// The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same Amazon Web Services account and Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive. + /// The name of the tuning job. This name is the prefix for the names of all + /// training jobs that this tuning job launches. The name must be unique within + /// the same Amazon Web Services account and Amazon Web Services Region. The + /// name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and + /// : + = @ _ % - (hyphen). The name is not case sensitive. #[serde(rename = "hyperParameterTuningJobName")] pub hyper_parameter_tuning_job_name: String, - /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). - /// Tags that you specify for the tuning job are also added to all training jobs that the tuning job launches. + /// An array of key-value pairs. You can use tags to categorize your Amazon Web + /// Services resources in different ways, for example, by purpose, owner, or + /// environment. For more information, see Tagging Amazon Web Services Resources + /// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + /// + /// + /// Tags that you specify for the tuning job are also added to all training jobs + /// that the tuning job launches. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// The HyperParameterTrainingJobDefinition object that describes the training jobs that this tuning job launches, including static hyperparameters, input data configuration, output data configuration, resource configuration, and stopping condition. + /// The HyperParameterTrainingJobDefinition object that describes the training + /// jobs that this tuning job launches, including static hyperparameters, input + /// data configuration, output data configuration, resource configuration, and + /// stopping condition. #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingJobDefinition")] pub training_job_definition: Option, - /// A list of the HyperParameterTrainingJobDefinition objects launched for this tuning job. + /// A list of the HyperParameterTrainingJobDefinition objects launched for this + /// tuning job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingJobDefinitions")] pub training_job_definitions: Option>, - /// Specifies the configuration for starting the hyperparameter tuning job using one or more previous tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job. - /// All training jobs launched by the new hyperparameter tuning job are evaluated by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the WarmStartType value for the warm start configuration, the training job that performs the best in the new tuning job is compared to the best training jobs from the parent tuning jobs. From these, the training job that performs the best as measured by the objective metric is returned as the overall best training job. - /// All training jobs launched by parent hyperparameter tuning jobs and the new hyperparameter tuning jobs count against the limit of training jobs for the tuning job. + /// Specifies the configuration for starting the hyperparameter tuning job using + /// one or more previous tuning jobs as a starting point. The results of previous + /// tuning jobs are used to inform which combinations of hyperparameters to search + /// over in the new tuning job. + /// + /// + /// All training jobs launched by the new hyperparameter tuning job are evaluated + /// by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM + /// as the WarmStartType value for the warm start configuration, the training + /// job that performs the best in the new tuning job is compared to the best + /// training jobs from the parent tuning jobs. From these, the training job that + /// performs the best as measured by the objective metric is returned as the + /// overall best training job. + /// + /// + /// All training jobs launched by parent hyperparameter tuning jobs and the new + /// hyperparameter tuning jobs count against the limit of training jobs for the + /// tuning job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "warmStartConfig")] pub warm_start_config: Option, } -/// The HyperParameterTuningJobConfig object that describes the tuning job, including the search strategy, the objective metric used to evaluate training jobs, ranges of parameters to search, and resource limits for the tuning job. For more information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). +/// The HyperParameterTuningJobConfig object that describes the tuning job, including +/// the search strategy, the objective metric used to evaluate training jobs, +/// ranges of parameters to search, and resource limits for the tuning job. For +/// more information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobHyperParameterTuningJobConfig { - /// Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter. + /// Defines the objective metric for a hyperparameter tuning job. Hyperparameter + /// tuning uses the value of this metric to evaluate the training jobs it launches, + /// and returns the training job that results in either the highest or lowest + /// value for this metric, depending on the value you specify for the Type parameter. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hyperParameterTuningJobObjective")] pub hyper_parameter_tuning_job_objective: Option, - /// Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. - /// The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified. + /// Specifies ranges of integer, continuous, and categorical hyperparameters + /// that a hyperparameter tuning job searches. The hyperparameter tuning job + /// launches training jobs with hyperparameter values within these ranges to + /// find the combination of values that result in the training job with the best + /// performance as measured by the objective metric of the hyperparameter tuning + /// job. + /// + /// + /// The maximum number of items specified for Array Members refers to the maximum + /// number of hyperparameters for each range and also the maximum for the hyperparameter + /// tuning job itself. That is, the sum of the number of hyperparameters for + /// all the ranges can't exceed the maximum number specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "parameterRanges")] pub parameter_ranges: Option, - /// Specifies the maximum number of training jobs and parallel training jobs that a hyperparameter tuning job can launch. + /// Specifies the maximum number of training jobs and parallel training jobs + /// that a hyperparameter tuning job can launch. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceLimits")] pub resource_limits: Option, - /// The strategy hyperparameter tuning uses to find the best combination of hyperparameters for your model. + /// The strategy hyperparameter tuning uses to find the best combination of hyperparameters + /// for your model. #[serde(default, skip_serializing_if = "Option::is_none")] pub strategy: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingJobEarlyStoppingType")] @@ -59,7 +109,10 @@ pub struct HyperParameterTuningJobHyperParameterTuningJobConfig { pub tuning_job_completion_criteria: Option, } -/// Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter. +/// Defines the objective metric for a hyperparameter tuning job. Hyperparameter +/// tuning uses the value of this metric to evaluate the training jobs it launches, +/// and returns the training job that results in either the highest or lowest +/// value for this metric, depending on the value you specify for the Type parameter. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobHyperParameterTuningJobConfigHyperParameterTuningJobObjective { #[serde(default, skip_serializing_if = "Option::is_none", rename = "metricName")] @@ -68,8 +121,18 @@ pub struct HyperParameterTuningJobHyperParameterTuningJobConfigHyperParameterTun pub r#type: Option, } -/// Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. -/// The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified. +/// Specifies ranges of integer, continuous, and categorical hyperparameters +/// that a hyperparameter tuning job searches. The hyperparameter tuning job +/// launches training jobs with hyperparameter values within these ranges to +/// find the combination of values that result in the training job with the best +/// performance as measured by the objective metric of the hyperparameter tuning +/// job. +/// +/// +/// The maximum number of items specified for Array Members refers to the maximum +/// number of hyperparameters for each range and also the maximum for the hyperparameter +/// tuning job itself. That is, the sum of the number of hyperparameters for +/// all the ranges can't exceed the maximum number specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobHyperParameterTuningJobConfigParameterRanges { #[serde(default, skip_serializing_if = "Option::is_none", rename = "categoricalParameterRanges")] @@ -102,7 +165,8 @@ pub struct HyperParameterTuningJobHyperParameterTuningJobConfigParameterRangesCo pub scaling_type: Option, } -/// For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches. +/// For a hyperparameter of the integer type, specifies the range that a hyperparameter +/// tuning job searches. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobHyperParameterTuningJobConfigParameterRangesIntegerParameterRanges { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxValue")] @@ -115,7 +179,8 @@ pub struct HyperParameterTuningJobHyperParameterTuningJobConfigParameterRangesIn pub scaling_type: Option, } -/// Specifies the maximum number of training jobs and parallel training jobs that a hyperparameter tuning job can launch. +/// Specifies the maximum number of training jobs and parallel training jobs +/// that a hyperparameter tuning job can launch. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobHyperParameterTuningJobConfigResourceLimits { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxNumberOfTrainingJobs")] @@ -131,9 +196,21 @@ pub struct HyperParameterTuningJobHyperParameterTuningJobConfigTuningJobCompleti pub target_objective_metric_value: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -142,13 +219,18 @@ pub struct HyperParameterTuningJobTags { pub value: Option, } -/// The HyperParameterTrainingJobDefinition object that describes the training jobs that this tuning job launches, including static hyperparameters, input data configuration, output data configuration, resource configuration, and stopping condition. +/// The HyperParameterTrainingJobDefinition object that describes the training +/// jobs that this tuning job launches, including static hyperparameters, input +/// data configuration, output data configuration, resource configuration, and +/// stopping condition. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinition { - /// Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor. + /// Specifies which training algorithm to use for training jobs that a hyperparameter + /// tuning job launches and the metrics to monitor. #[serde(default, skip_serializing_if = "Option::is_none", rename = "algorithmSpecification")] pub algorithm_specification: Option, - /// Contains information about the output location for managed spot training checkpoint data. + /// Contains information about the output location for managed spot training + /// checkpoint data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "checkpointConfig")] pub checkpoint_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "definitionName")] @@ -159,8 +241,18 @@ pub struct HyperParameterTuningJobTrainingJobDefinition { pub enable_managed_spot_training: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. - /// The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified. + /// Specifies ranges of integer, continuous, and categorical hyperparameters + /// that a hyperparameter tuning job searches. The hyperparameter tuning job + /// launches training jobs with hyperparameter values within these ranges to + /// find the combination of values that result in the training job with the best + /// performance as measured by the objective metric of the hyperparameter tuning + /// job. + /// + /// + /// The maximum number of items specified for Array Members refers to the maximum + /// number of hyperparameters for each range and also the maximum for the hyperparameter + /// tuning job itself. That is, the sum of the number of hyperparameters for + /// all the ranges can't exceed the maximum number specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hyperParameterRanges")] pub hyper_parameter_ranges: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputDataConfig")] @@ -168,31 +260,61 @@ pub struct HyperParameterTuningJobTrainingJobDefinition { /// Provides information about how to store model training results (model artifacts). #[serde(default, skip_serializing_if = "Option::is_none", rename = "outputDataConfig")] pub output_data_config: Option, - /// Describes the resources, including machine learning (ML) compute instances and ML storage volumes, to use for model training. + /// Describes the resources, including machine learning (ML) compute instances + /// and ML storage volumes, to use for model training. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceConfig")] pub resource_config: Option, - /// The retry strategy to use when a training job fails due to an InternalServerError. RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob requests. You can add the StoppingCondition parameter to the request to limit the training time for the complete job. + /// The retry strategy to use when a training job fails due to an InternalServerError. + /// RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob + /// requests. You can add the StoppingCondition parameter to the request to limit + /// the training time for the complete job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "retryStrategy")] pub retry_strategy: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "roleARN")] pub role_arn: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "staticHyperParameters")] pub static_hyper_parameters: Option>, - /// Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs. - /// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. - /// The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel. - /// The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete. + /// Specifies a limit to how long a model training job or model compilation job + /// can run. It also specifies how long a managed spot training job has to complete. + /// When the job reaches the time limit, SageMaker ends the training or compilation + /// job. Use this API to cap model training costs. + /// + /// + /// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, + /// which delays job termination for 120 seconds. Algorithms can use this 120-second + /// window to save the model artifacts, so the results of training are not lost. + /// + /// + /// The training algorithms provided by SageMaker automatically save the intermediate + /// results of a model training job when possible. This attempt to save artifacts + /// is only a best effort case as model might not be in a state from which it + /// can be saved. For example, if training has just started, the model might + /// not be ready to save. When saved, this intermediate data is a valid model + /// artifact. You can use it to create a model with CreateModel. + /// + /// + /// The Neural Topic Model (NTM) currently does not support saving intermediate + /// model artifacts. When training NTMs, make sure that the maximum runtime is + /// sufficient for the training job to complete. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppingCondition")] pub stopping_condition: Option, - /// Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter. + /// Defines the objective metric for a hyperparameter tuning job. Hyperparameter + /// tuning uses the value of this metric to evaluate the training jobs it launches, + /// and returns the training job that results in either the highest or lowest + /// value for this metric, depending on the value you specify for the Type parameter. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tuningObjective")] pub tuning_objective: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor. +/// Specifies which training algorithm to use for training jobs that a hyperparameter +/// tuning job launches and the metrics to monitor. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionAlgorithmSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "algorithmName")] @@ -201,21 +323,59 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionAlgorithmSpecification { pub metric_definitions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingImage")] pub training_image: Option, - /// The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - /// Pipe mode - /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. - /// File mode - /// If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. - /// You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. - /// For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. - /// FastFile mode - /// If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. - /// FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided. + /// The training input mode that the algorithm supports. For more information + /// about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// + /// + /// Pipe mode + /// + /// + /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + /// from Amazon S3 to the container. + /// + /// + /// File mode + /// + /// + /// If an algorithm supports File mode, SageMaker downloads the training data + /// from S3 to the provisioned ML storage volume, and mounts the directory to + /// the Docker volume for the training container. + /// + /// + /// You must provision the ML storage volume with sufficient capacity to accommodate + /// the data downloaded from S3. In addition to the training data, the ML storage + /// volume also stores the output model. The algorithm container uses the ML + /// storage volume to also store intermediate information, if any. + /// + /// + /// For distributed algorithms, training data is distributed uniformly. Your + /// training duration is predictable if the input data objects sizes are approximately + /// the same. SageMaker does not split the files any further for model training. + /// If the object sizes are skewed, training won't be optimal as the data distribution + /// is also skewed when one host in a training cluster is overloaded, thus becoming + /// a bottleneck in training. + /// + /// + /// FastFile mode + /// + /// + /// If an algorithm supports FastFile mode, SageMaker streams data directly from + /// S3 to the container with no code changes, and provides file system access + /// to the data. Users can author their training script to interact with these + /// files as if they were stored on disk. + /// + /// + /// FastFile mode works best when the data is read sequentially. Augmented manifest + /// files aren't supported. The startup time is lower when there are fewer files + /// in the S3 bucket provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingInputMode")] pub training_input_mode: Option, } -/// Specifies a metric that the training algorithm writes to stderr or stdout. SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job. +/// Specifies a metric that the training algorithm writes to stderr or stdout. +/// SageMakerhyperparameter tuning captures all defined metrics. You specify +/// one metric that a hyperparameter tuning job uses as its objective metric +/// to choose the best training job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionAlgorithmSpecificationMetricDefinitions { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -224,7 +384,8 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionAlgorithmSpecificationMet pub regex: Option, } -/// Contains information about the output location for managed spot training checkpoint data. +/// Contains information about the output location for managed spot training +/// checkpoint data. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionCheckpointConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -233,8 +394,18 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionCheckpointConfig { pub s3_uri: Option, } -/// Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. -/// The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified. +/// Specifies ranges of integer, continuous, and categorical hyperparameters +/// that a hyperparameter tuning job searches. The hyperparameter tuning job +/// launches training jobs with hyperparameter values within these ranges to +/// find the combination of values that result in the training job with the best +/// performance as measured by the objective metric of the hyperparameter tuning +/// job. +/// +/// +/// The maximum number of items specified for Array Members refers to the maximum +/// number of hyperparameters for each range and also the maximum for the hyperparameter +/// tuning job itself. That is, the sum of the number of hyperparameters for +/// all the ranges can't exceed the maximum number specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionHyperParameterRanges { #[serde(default, skip_serializing_if = "Option::is_none", rename = "categoricalParameterRanges")] @@ -267,7 +438,8 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionHyperParameterRangesConti pub scaling_type: Option, } -/// For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches. +/// For a hyperparameter of the integer type, specifies the range that a hyperparameter +/// tuning job searches. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionHyperParameterRangesIntegerParameterRanges { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxValue")] @@ -292,22 +464,70 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionInputDataConfig { /// Describes the location of the channel data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSource")] pub data_source: Option, - /// The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - /// Pipe mode - /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. - /// File mode - /// If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. - /// You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. - /// For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. - /// FastFile mode - /// If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. - /// FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided. + /// The training input mode that the algorithm supports. For more information + /// about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// + /// + /// Pipe mode + /// + /// + /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + /// from Amazon S3 to the container. + /// + /// + /// File mode + /// + /// + /// If an algorithm supports File mode, SageMaker downloads the training data + /// from S3 to the provisioned ML storage volume, and mounts the directory to + /// the Docker volume for the training container. + /// + /// + /// You must provision the ML storage volume with sufficient capacity to accommodate + /// the data downloaded from S3. In addition to the training data, the ML storage + /// volume also stores the output model. The algorithm container uses the ML + /// storage volume to also store intermediate information, if any. + /// + /// + /// For distributed algorithms, training data is distributed uniformly. Your + /// training duration is predictable if the input data objects sizes are approximately + /// the same. SageMaker does not split the files any further for model training. + /// If the object sizes are skewed, training won't be optimal as the data distribution + /// is also skewed when one host in a training cluster is overloaded, thus becoming + /// a bottleneck in training. + /// + /// + /// FastFile mode + /// + /// + /// If an algorithm supports FastFile mode, SageMaker streams data directly from + /// S3 to the container with no code changes, and provides file system access + /// to the data. Users can author their training script to interact with these + /// files as if they were stored on disk. + /// + /// + /// FastFile mode works best when the data is read sequentially. Augmented manifest + /// files aren't supported. The startup time is lower when there are fewer files + /// in the S3 bucket provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputMode")] pub input_mode: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "recordWrapperType")] pub record_wrapper_type: Option, - /// A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. - /// For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch. + /// A configuration for a shuffle option for input data in a channel. If you + /// use S3Prefix for S3DataType, the results of the S3 key prefix matches are + /// shuffled. If you use ManifestFile, the order of the S3 object references + /// in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order + /// of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling + /// order is determined using the Seed value. + /// + /// + /// For Pipe input mode, when ShuffleConfig is specified shuffling is done at + /// the start of every epoch. With large datasets, this ensures that the order + /// of the training data is different for each epoch, and it helps reduce bias + /// and possible overfitting. In a multi-node training job when ShuffleConfig + /// is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled + /// across nodes so that the content sent to a particular node on the first epoch + /// might be sent to a different node on the second epoch. #[serde(default, skip_serializing_if = "Option::is_none", rename = "shuffleConfig")] pub shuffle_config: Option, } @@ -351,8 +571,21 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionInputDataConfigDataSource pub s3_uri: Option, } -/// A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. -/// For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch. +/// A configuration for a shuffle option for input data in a channel. If you +/// use S3Prefix for S3DataType, the results of the S3 key prefix matches are +/// shuffled. If you use ManifestFile, the order of the S3 object references +/// in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order +/// of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling +/// order is determined using the Seed value. +/// +/// +/// For Pipe input mode, when ShuffleConfig is specified shuffling is done at +/// the start of every epoch. With large datasets, this ensures that the order +/// of the training data is different for each epoch, and it helps reduce bias +/// and possible overfitting. In a multi-node training job when ShuffleConfig +/// is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled +/// across nodes so that the content sent to a particular node on the first epoch +/// might be sent to a different node on the second epoch. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionInputDataConfigShuffleConfig { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -368,7 +601,8 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionOutputDataConfig { pub s3_output_path: Option, } -/// Describes the resources, including machine learning (ML) compute instances and ML storage volumes, to use for model training. +/// Describes the resources, including machine learning (ML) compute instances +/// and ML storage volumes, to use for model training. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionResourceConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -385,7 +619,9 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionResourceConfig { pub volume_size_in_gb: Option, } -/// Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) API, you can configure multiple instance groups . +/// Defines an instance group for heterogeneous cluster training. When requesting +/// a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) +/// API, you can configure multiple instance groups . #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionResourceConfigInstanceGroups { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -396,17 +632,38 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionResourceConfigInstanceGro pub instance_type: Option, } -/// The retry strategy to use when a training job fails due to an InternalServerError. RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob requests. You can add the StoppingCondition parameter to the request to limit the training time for the complete job. +/// The retry strategy to use when a training job fails due to an InternalServerError. +/// RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob +/// requests. You can add the StoppingCondition parameter to the request to limit +/// the training time for the complete job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionRetryStrategy { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maximumRetryAttempts")] pub maximum_retry_attempts: Option, } -/// Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs. -/// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. -/// The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel. -/// The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete. +/// Specifies a limit to how long a model training job or model compilation job +/// can run. It also specifies how long a managed spot training job has to complete. +/// When the job reaches the time limit, SageMaker ends the training or compilation +/// job. Use this API to cap model training costs. +/// +/// +/// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, +/// which delays job termination for 120 seconds. Algorithms can use this 120-second +/// window to save the model artifacts, so the results of training are not lost. +/// +/// +/// The training algorithms provided by SageMaker automatically save the intermediate +/// results of a model training job when possible. This attempt to save artifacts +/// is only a best effort case as model might not be in a state from which it +/// can be saved. For example, if training has just started, the model might +/// not be ready to save. When saved, this intermediate data is a valid model +/// artifact. You can use it to create a model with CreateModel. +/// +/// +/// The Neural Topic Model (NTM) currently does not support saving intermediate +/// model artifacts. When training NTMs, make sure that the maximum runtime is +/// sufficient for the training job to complete. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionStoppingCondition { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxRuntimeInSeconds")] @@ -415,7 +672,10 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionStoppingCondition { pub max_wait_time_in_seconds: Option, } -/// Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter. +/// Defines the objective metric for a hyperparameter tuning job. Hyperparameter +/// tuning uses the value of this metric to evaluate the training jobs it launches, +/// and returns the training job that results in either the highest or lowest +/// value for this metric, depending on the value you specify for the Type parameter. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionTuningObjective { #[serde(default, skip_serializing_if = "Option::is_none", rename = "metricName")] @@ -424,7 +684,11 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionTuningObjective { pub r#type: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -436,10 +700,12 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionVpcConfig { /// Defines the training jobs launched by a hyperparameter tuning job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitions { - /// Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor. + /// Specifies which training algorithm to use for training jobs that a hyperparameter + /// tuning job launches and the metrics to monitor. #[serde(default, skip_serializing_if = "Option::is_none", rename = "algorithmSpecification")] pub algorithm_specification: Option, - /// Contains information about the output location for managed spot training checkpoint data. + /// Contains information about the output location for managed spot training + /// checkpoint data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "checkpointConfig")] pub checkpoint_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "definitionName")] @@ -450,8 +716,18 @@ pub struct HyperParameterTuningJobTrainingJobDefinitions { pub enable_managed_spot_training: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. - /// The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified. + /// Specifies ranges of integer, continuous, and categorical hyperparameters + /// that a hyperparameter tuning job searches. The hyperparameter tuning job + /// launches training jobs with hyperparameter values within these ranges to + /// find the combination of values that result in the training job with the best + /// performance as measured by the objective metric of the hyperparameter tuning + /// job. + /// + /// + /// The maximum number of items specified for Array Members refers to the maximum + /// number of hyperparameters for each range and also the maximum for the hyperparameter + /// tuning job itself. That is, the sum of the number of hyperparameters for + /// all the ranges can't exceed the maximum number specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hyperParameterRanges")] pub hyper_parameter_ranges: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputDataConfig")] @@ -459,31 +735,61 @@ pub struct HyperParameterTuningJobTrainingJobDefinitions { /// Provides information about how to store model training results (model artifacts). #[serde(default, skip_serializing_if = "Option::is_none", rename = "outputDataConfig")] pub output_data_config: Option, - /// Describes the resources, including machine learning (ML) compute instances and ML storage volumes, to use for model training. + /// Describes the resources, including machine learning (ML) compute instances + /// and ML storage volumes, to use for model training. #[serde(default, skip_serializing_if = "Option::is_none", rename = "resourceConfig")] pub resource_config: Option, - /// The retry strategy to use when a training job fails due to an InternalServerError. RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob requests. You can add the StoppingCondition parameter to the request to limit the training time for the complete job. + /// The retry strategy to use when a training job fails due to an InternalServerError. + /// RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob + /// requests. You can add the StoppingCondition parameter to the request to limit + /// the training time for the complete job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "retryStrategy")] pub retry_strategy: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "roleARN")] pub role_arn: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "staticHyperParameters")] pub static_hyper_parameters: Option>, - /// Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs. - /// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. - /// The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel. - /// The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete. + /// Specifies a limit to how long a model training job or model compilation job + /// can run. It also specifies how long a managed spot training job has to complete. + /// When the job reaches the time limit, SageMaker ends the training or compilation + /// job. Use this API to cap model training costs. + /// + /// + /// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, + /// which delays job termination for 120 seconds. Algorithms can use this 120-second + /// window to save the model artifacts, so the results of training are not lost. + /// + /// + /// The training algorithms provided by SageMaker automatically save the intermediate + /// results of a model training job when possible. This attempt to save artifacts + /// is only a best effort case as model might not be in a state from which it + /// can be saved. For example, if training has just started, the model might + /// not be ready to save. When saved, this intermediate data is a valid model + /// artifact. You can use it to create a model with CreateModel. + /// + /// + /// The Neural Topic Model (NTM) currently does not support saving intermediate + /// model artifacts. When training NTMs, make sure that the maximum runtime is + /// sufficient for the training job to complete. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppingCondition")] pub stopping_condition: Option, - /// Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter. + /// Defines the objective metric for a hyperparameter tuning job. Hyperparameter + /// tuning uses the value of this metric to evaluate the training jobs it launches, + /// and returns the training job that results in either the highest or lowest + /// value for this metric, depending on the value you specify for the Type parameter. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tuningObjective")] pub tuning_objective: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies which training algorithm to use for training jobs that a hyperparameter tuning job launches and the metrics to monitor. +/// Specifies which training algorithm to use for training jobs that a hyperparameter +/// tuning job launches and the metrics to monitor. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsAlgorithmSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "algorithmName")] @@ -492,21 +798,59 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsAlgorithmSpecification { pub metric_definitions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingImage")] pub training_image: Option, - /// The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - /// Pipe mode - /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. - /// File mode - /// If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. - /// You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. - /// For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. - /// FastFile mode - /// If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. - /// FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided. + /// The training input mode that the algorithm supports. For more information + /// about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// + /// + /// Pipe mode + /// + /// + /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + /// from Amazon S3 to the container. + /// + /// + /// File mode + /// + /// + /// If an algorithm supports File mode, SageMaker downloads the training data + /// from S3 to the provisioned ML storage volume, and mounts the directory to + /// the Docker volume for the training container. + /// + /// + /// You must provision the ML storage volume with sufficient capacity to accommodate + /// the data downloaded from S3. In addition to the training data, the ML storage + /// volume also stores the output model. The algorithm container uses the ML + /// storage volume to also store intermediate information, if any. + /// + /// + /// For distributed algorithms, training data is distributed uniformly. Your + /// training duration is predictable if the input data objects sizes are approximately + /// the same. SageMaker does not split the files any further for model training. + /// If the object sizes are skewed, training won't be optimal as the data distribution + /// is also skewed when one host in a training cluster is overloaded, thus becoming + /// a bottleneck in training. + /// + /// + /// FastFile mode + /// + /// + /// If an algorithm supports FastFile mode, SageMaker streams data directly from + /// S3 to the container with no code changes, and provides file system access + /// to the data. Users can author their training script to interact with these + /// files as if they were stored on disk. + /// + /// + /// FastFile mode works best when the data is read sequentially. Augmented manifest + /// files aren't supported. The startup time is lower when there are fewer files + /// in the S3 bucket provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingInputMode")] pub training_input_mode: Option, } -/// Specifies a metric that the training algorithm writes to stderr or stdout. SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job. +/// Specifies a metric that the training algorithm writes to stderr or stdout. +/// SageMakerhyperparameter tuning captures all defined metrics. You specify +/// one metric that a hyperparameter tuning job uses as its objective metric +/// to choose the best training job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsAlgorithmSpecificationMetricDefinitions { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -515,7 +859,8 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsAlgorithmSpecificationMe pub regex: Option, } -/// Contains information about the output location for managed spot training checkpoint data. +/// Contains information about the output location for managed spot training +/// checkpoint data. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsCheckpointConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -524,8 +869,18 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsCheckpointConfig { pub s3_uri: Option, } -/// Specifies ranges of integer, continuous, and categorical hyperparameters that a hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs with hyperparameter values within these ranges to find the combination of values that result in the training job with the best performance as measured by the objective metric of the hyperparameter tuning job. -/// The maximum number of items specified for Array Members refers to the maximum number of hyperparameters for each range and also the maximum for the hyperparameter tuning job itself. That is, the sum of the number of hyperparameters for all the ranges can't exceed the maximum number specified. +/// Specifies ranges of integer, continuous, and categorical hyperparameters +/// that a hyperparameter tuning job searches. The hyperparameter tuning job +/// launches training jobs with hyperparameter values within these ranges to +/// find the combination of values that result in the training job with the best +/// performance as measured by the objective metric of the hyperparameter tuning +/// job. +/// +/// +/// The maximum number of items specified for Array Members refers to the maximum +/// number of hyperparameters for each range and also the maximum for the hyperparameter +/// tuning job itself. That is, the sum of the number of hyperparameters for +/// all the ranges can't exceed the maximum number specified. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsHyperParameterRanges { #[serde(default, skip_serializing_if = "Option::is_none", rename = "categoricalParameterRanges")] @@ -558,7 +913,8 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsHyperParameterRangesCont pub scaling_type: Option, } -/// For a hyperparameter of the integer type, specifies the range that a hyperparameter tuning job searches. +/// For a hyperparameter of the integer type, specifies the range that a hyperparameter +/// tuning job searches. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsHyperParameterRangesIntegerParameterRanges { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxValue")] @@ -583,22 +939,70 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsInputDataConfig { /// Describes the location of the channel data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSource")] pub data_source: Option, - /// The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - /// Pipe mode - /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. - /// File mode - /// If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. - /// You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. - /// For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. - /// FastFile mode - /// If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. - /// FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided. + /// The training input mode that the algorithm supports. For more information + /// about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// + /// + /// Pipe mode + /// + /// + /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + /// from Amazon S3 to the container. + /// + /// + /// File mode + /// + /// + /// If an algorithm supports File mode, SageMaker downloads the training data + /// from S3 to the provisioned ML storage volume, and mounts the directory to + /// the Docker volume for the training container. + /// + /// + /// You must provision the ML storage volume with sufficient capacity to accommodate + /// the data downloaded from S3. In addition to the training data, the ML storage + /// volume also stores the output model. The algorithm container uses the ML + /// storage volume to also store intermediate information, if any. + /// + /// + /// For distributed algorithms, training data is distributed uniformly. Your + /// training duration is predictable if the input data objects sizes are approximately + /// the same. SageMaker does not split the files any further for model training. + /// If the object sizes are skewed, training won't be optimal as the data distribution + /// is also skewed when one host in a training cluster is overloaded, thus becoming + /// a bottleneck in training. + /// + /// + /// FastFile mode + /// + /// + /// If an algorithm supports FastFile mode, SageMaker streams data directly from + /// S3 to the container with no code changes, and provides file system access + /// to the data. Users can author their training script to interact with these + /// files as if they were stored on disk. + /// + /// + /// FastFile mode works best when the data is read sequentially. Augmented manifest + /// files aren't supported. The startup time is lower when there are fewer files + /// in the S3 bucket provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputMode")] pub input_mode: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "recordWrapperType")] pub record_wrapper_type: Option, - /// A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. - /// For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch. + /// A configuration for a shuffle option for input data in a channel. If you + /// use S3Prefix for S3DataType, the results of the S3 key prefix matches are + /// shuffled. If you use ManifestFile, the order of the S3 object references + /// in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order + /// of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling + /// order is determined using the Seed value. + /// + /// + /// For Pipe input mode, when ShuffleConfig is specified shuffling is done at + /// the start of every epoch. With large datasets, this ensures that the order + /// of the training data is different for each epoch, and it helps reduce bias + /// and possible overfitting. In a multi-node training job when ShuffleConfig + /// is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled + /// across nodes so that the content sent to a particular node on the first epoch + /// might be sent to a different node on the second epoch. #[serde(default, skip_serializing_if = "Option::is_none", rename = "shuffleConfig")] pub shuffle_config: Option, } @@ -642,8 +1046,21 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsInputDataConfigDataSourc pub s3_uri: Option, } -/// A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. -/// For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch. +/// A configuration for a shuffle option for input data in a channel. If you +/// use S3Prefix for S3DataType, the results of the S3 key prefix matches are +/// shuffled. If you use ManifestFile, the order of the S3 object references +/// in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order +/// of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling +/// order is determined using the Seed value. +/// +/// +/// For Pipe input mode, when ShuffleConfig is specified shuffling is done at +/// the start of every epoch. With large datasets, this ensures that the order +/// of the training data is different for each epoch, and it helps reduce bias +/// and possible overfitting. In a multi-node training job when ShuffleConfig +/// is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled +/// across nodes so that the content sent to a particular node on the first epoch +/// might be sent to a different node on the second epoch. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsInputDataConfigShuffleConfig { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -659,7 +1076,8 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsOutputDataConfig { pub s3_output_path: Option, } -/// Describes the resources, including machine learning (ML) compute instances and ML storage volumes, to use for model training. +/// Describes the resources, including machine learning (ML) compute instances +/// and ML storage volumes, to use for model training. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsResourceConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -676,7 +1094,9 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsResourceConfig { pub volume_size_in_gb: Option, } -/// Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) API, you can configure multiple instance groups . +/// Defines an instance group for heterogeneous cluster training. When requesting +/// a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) +/// API, you can configure multiple instance groups . #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsResourceConfigInstanceGroups { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -687,17 +1107,38 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsResourceConfigInstanceGr pub instance_type: Option, } -/// The retry strategy to use when a training job fails due to an InternalServerError. RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob requests. You can add the StoppingCondition parameter to the request to limit the training time for the complete job. +/// The retry strategy to use when a training job fails due to an InternalServerError. +/// RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob +/// requests. You can add the StoppingCondition parameter to the request to limit +/// the training time for the complete job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsRetryStrategy { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maximumRetryAttempts")] pub maximum_retry_attempts: Option, } -/// Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs. -/// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. -/// The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel. -/// The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete. +/// Specifies a limit to how long a model training job or model compilation job +/// can run. It also specifies how long a managed spot training job has to complete. +/// When the job reaches the time limit, SageMaker ends the training or compilation +/// job. Use this API to cap model training costs. +/// +/// +/// To stop a training job, SageMaker sends the algorithm the SIGTERM signal, +/// which delays job termination for 120 seconds. Algorithms can use this 120-second +/// window to save the model artifacts, so the results of training are not lost. +/// +/// +/// The training algorithms provided by SageMaker automatically save the intermediate +/// results of a model training job when possible. This attempt to save artifacts +/// is only a best effort case as model might not be in a state from which it +/// can be saved. For example, if training has just started, the model might +/// not be ready to save. When saved, this intermediate data is a valid model +/// artifact. You can use it to create a model with CreateModel. +/// +/// +/// The Neural Topic Model (NTM) currently does not support saving intermediate +/// model artifacts. When training NTMs, make sure that the maximum runtime is +/// sufficient for the training job to complete. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsStoppingCondition { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxRuntimeInSeconds")] @@ -706,7 +1147,10 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsStoppingCondition { pub max_wait_time_in_seconds: Option, } -/// Defines the objective metric for a hyperparameter tuning job. Hyperparameter tuning uses the value of this metric to evaluate the training jobs it launches, and returns the training job that results in either the highest or lowest value for this metric, depending on the value you specify for the Type parameter. +/// Defines the objective metric for a hyperparameter tuning job. Hyperparameter +/// tuning uses the value of this metric to evaluate the training jobs it launches, +/// and returns the training job that results in either the highest or lowest +/// value for this metric, depending on the value you specify for the Type parameter. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsTuningObjective { #[serde(default, skip_serializing_if = "Option::is_none", rename = "metricName")] @@ -715,7 +1159,11 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsTuningObjective { pub r#type: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobTrainingJobDefinitionsVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -724,9 +1172,24 @@ pub struct HyperParameterTuningJobTrainingJobDefinitionsVpcConfig { pub subnets: Option>, } -/// Specifies the configuration for starting the hyperparameter tuning job using one or more previous tuning jobs as a starting point. The results of previous tuning jobs are used to inform which combinations of hyperparameters to search over in the new tuning job. -/// All training jobs launched by the new hyperparameter tuning job are evaluated by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the WarmStartType value for the warm start configuration, the training job that performs the best in the new tuning job is compared to the best training jobs from the parent tuning jobs. From these, the training job that performs the best as measured by the objective metric is returned as the overall best training job. -/// All training jobs launched by parent hyperparameter tuning jobs and the new hyperparameter tuning jobs count against the limit of training jobs for the tuning job. +/// Specifies the configuration for starting the hyperparameter tuning job using +/// one or more previous tuning jobs as a starting point. The results of previous +/// tuning jobs are used to inform which combinations of hyperparameters to search +/// over in the new tuning job. +/// +/// +/// All training jobs launched by the new hyperparameter tuning job are evaluated +/// by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM +/// as the WarmStartType value for the warm start configuration, the training +/// job that performs the best in the new tuning job is compared to the best +/// training jobs from the parent tuning jobs. From these, the training job that +/// performs the best as measured by the objective metric is returned as the +/// overall best training job. +/// +/// +/// All training jobs launched by parent hyperparameter tuning jobs and the new +/// hyperparameter tuning jobs count against the limit of training jobs for the +/// tuning job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobWarmStartConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "parentHyperParameterTuningJobs")] @@ -735,7 +1198,8 @@ pub struct HyperParameterTuningJobWarmStartConfig { pub warm_start_type: Option, } -/// A previously completed or stopped hyperparameter tuning job to be used as a starting point for a new hyperparameter tuning job. +/// A previously completed or stopped hyperparameter tuning job to be used as +/// a starting point for a new hyperparameter tuning job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobWarmStartConfigParentHyperParameterTuningJobs { #[serde(default, skip_serializing_if = "Option::is_none", rename = "hyperParameterTuningJobName")] @@ -745,47 +1209,70 @@ pub struct HyperParameterTuningJobWarmStartConfigParentHyperParameterTuningJobs /// HyperParameterTuningJobStatus defines the observed state of HyperParameterTuningJob #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// A TrainingJobSummary object that describes the training job that completed with the best current HyperParameterTuningJobObjective. + /// A TrainingJobSummary object that describes the training job that completed + /// with the best current HyperParameterTuningJobObjective. #[serde(default, skip_serializing_if = "Option::is_none", rename = "bestTrainingJob")] pub best_training_job: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// If the tuning job failed, the reason it failed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] pub failure_reason: Option, - /// The status of the tuning job: InProgress, Completed, Failed, Stopping, or Stopped. + /// The status of the tuning job: InProgress, Completed, Failed, Stopping, or + /// Stopped. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hyperParameterTuningJobStatus")] pub hyper_parameter_tuning_job_status: Option, - /// If the hyperparameter tuning job is an warm start tuning job with a WarmStartType of IDENTICAL_DATA_AND_ALGORITHM, this is the TrainingJobSummary for the training job with the best objective metric value of all training jobs launched by this tuning job and all parent jobs specified for the warm start tuning job. + /// If the hyperparameter tuning job is an warm start tuning job with a WarmStartType + /// of IDENTICAL_DATA_AND_ALGORITHM, this is the TrainingJobSummary for the training + /// job with the best objective metric value of all training jobs launched by + /// this tuning job and all parent jobs specified for the warm start tuning job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "overallBestTrainingJob")] pub overall_best_training_job: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// A TrainingJobSummary object that describes the training job that completed with the best current HyperParameterTuningJobObjective. +/// A TrainingJobSummary object that describes the training job that completed +/// with the best current HyperParameterTuningJobObjective. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobStatusBestTrainingJob { #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationTime")] pub creation_time: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] pub failure_reason: Option, - /// Shows the latest objective metric emitted by a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig. + /// Shows the latest objective metric emitted by a training job that was launched + /// by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective + /// parameter of HyperParameterTuningJobConfig. #[serde(default, skip_serializing_if = "Option::is_none", rename = "finalHyperParameterTuningJobObjectiveMetric")] pub final_hyper_parameter_tuning_job_objective_metric: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "objectiveStatus")] @@ -808,7 +1295,9 @@ pub struct HyperParameterTuningJobStatusBestTrainingJob { pub tuning_job_name: Option, } -/// Shows the latest objective metric emitted by a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig. +/// Shows the latest objective metric emitted by a training job that was launched +/// by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective +/// parameter of HyperParameterTuningJobConfig. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobStatusBestTrainingJobFinalHyperParameterTuningJobObjectiveMetric { #[serde(default, skip_serializing_if = "Option::is_none", rename = "metricName")] @@ -819,7 +1308,9 @@ pub struct HyperParameterTuningJobStatusBestTrainingJobFinalHyperParameterTuning pub value: Option, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobStatusConditions { /// Last time the condition transitioned from one status to another. @@ -838,14 +1329,19 @@ pub struct HyperParameterTuningJobStatusConditions { pub r#type: String, } -/// If the hyperparameter tuning job is an warm start tuning job with a WarmStartType of IDENTICAL_DATA_AND_ALGORITHM, this is the TrainingJobSummary for the training job with the best objective metric value of all training jobs launched by this tuning job and all parent jobs specified for the warm start tuning job. +/// If the hyperparameter tuning job is an warm start tuning job with a WarmStartType +/// of IDENTICAL_DATA_AND_ALGORITHM, this is the TrainingJobSummary for the training +/// job with the best objective metric value of all training jobs launched by +/// this tuning job and all parent jobs specified for the warm start tuning job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobStatusOverallBestTrainingJob { #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationTime")] pub creation_time: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] pub failure_reason: Option, - /// Shows the latest objective metric emitted by a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig. + /// Shows the latest objective metric emitted by a training job that was launched + /// by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective + /// parameter of HyperParameterTuningJobConfig. #[serde(default, skip_serializing_if = "Option::is_none", rename = "finalHyperParameterTuningJobObjectiveMetric")] pub final_hyper_parameter_tuning_job_objective_metric: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "objectiveStatus")] @@ -868,7 +1364,9 @@ pub struct HyperParameterTuningJobStatusOverallBestTrainingJob { pub tuning_job_name: Option, } -/// Shows the latest objective metric emitted by a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig. +/// Shows the latest objective metric emitted by a training job that was launched +/// by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective +/// parameter of HyperParameterTuningJobConfig. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct HyperParameterTuningJobStatusOverallBestTrainingJobFinalHyperParameterTuningJobObjectiveMetric { #[serde(default, skip_serializing_if = "Option::is_none", rename = "metricName")] diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelbiasjobdefinitions.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelbiasjobdefinitions.rs index 76911e7b6..bc2d9d9f7 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelbiasjobdefinitions.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelbiasjobdefinitions.rs @@ -13,7 +13,8 @@ use std::collections::BTreeMap; #[kube(status = "ModelBiasJobDefinitionStatus")] #[kube(schema = "disabled")] pub struct ModelBiasJobDefinitionSpec { - /// The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account. + /// The name of the bias job definition. The name must be unique within an Amazon + /// Web Services Region in the Amazon Web Services account. #[serde(rename = "jobDefinitionName")] pub job_definition_name: String, /// Identifies the resources to deploy for a monitoring job. @@ -34,13 +35,16 @@ pub struct ModelBiasJobDefinitionSpec { /// Networking options for a model bias job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkConfig")] pub network_config: Option, - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + /// to perform tasks on your behalf. #[serde(rename = "roleARN")] pub role_arn: String, /// A time limit for how long the monitoring job is allowed to run before stopping. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppingCondition")] pub stopping_condition: Option, - /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide. + /// (Optional) An array of key-value pairs. For more information, see Using Cost + /// Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + /// in the Amazon Web Services Billing and Cost Management User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -149,12 +153,14 @@ pub struct ModelBiasJobDefinitionModelBiasJobOutputConfig { /// The output object for a monitoring job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelBiasJobDefinitionModelBiasJobOutputConfigMonitoringOutputs { - /// Information about where and how you want to store the results of a monitoring job. + /// Information about where and how you want to store the results of a monitoring + /// job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3Output")] pub s3_output: Option, } -/// Information about where and how you want to store the results of a monitoring job. +/// Information about where and how you want to store the results of a monitoring +/// job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelBiasJobDefinitionModelBiasJobOutputConfigMonitoringOutputsS3Output { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -172,12 +178,20 @@ pub struct ModelBiasJobDefinitionNetworkConfig { pub enable_inter_container_traffic_encryption: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelBiasJobDefinitionNetworkConfigVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -193,9 +207,21 @@ pub struct ModelBiasJobDefinitionStoppingCondition { pub max_runtime_in_seconds: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelBiasJobDefinitionTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -207,28 +233,45 @@ pub struct ModelBiasJobDefinitionTags { /// ModelBiasJobDefinitionStatus defines the observed state of ModelBiasJobDefinition #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelBiasJobDefinitionStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelBiasJobDefinitionStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelBiasJobDefinitionStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelexplainabilityjobdefinitions.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelexplainabilityjobdefinitions.rs index e0ed0c9ee..e28bdd2fd 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelexplainabilityjobdefinitions.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelexplainabilityjobdefinitions.rs @@ -13,13 +13,15 @@ use std::collections::BTreeMap; #[kube(status = "ModelExplainabilityJobDefinitionStatus")] #[kube(schema = "disabled")] pub struct ModelExplainabilityJobDefinitionSpec { - /// The name of the model explainability job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account. + /// The name of the model explainability job definition. The name must be unique + /// within an Amazon Web Services Region in the Amazon Web Services account. #[serde(rename = "jobDefinitionName")] pub job_definition_name: String, /// Identifies the resources to deploy for a monitoring job. #[serde(rename = "jobResources")] pub job_resources: ModelExplainabilityJobDefinitionJobResources, - /// Configures the model explainability job to run a specified Docker container image. + /// Configures the model explainability job to run a specified Docker container + /// image. #[serde(rename = "modelExplainabilityAppSpecification")] pub model_explainability_app_specification: ModelExplainabilityJobDefinitionModelExplainabilityAppSpecification, /// The baseline configuration for a model explainability job. @@ -34,13 +36,16 @@ pub struct ModelExplainabilityJobDefinitionSpec { /// Networking options for a model explainability job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkConfig")] pub network_config: Option, - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + /// to perform tasks on your behalf. #[serde(rename = "roleARN")] pub role_arn: String, /// A time limit for how long the monitoring job is allowed to run before stopping. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppingCondition")] pub stopping_condition: Option, - /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide. + /// (Optional) An array of key-value pairs. For more information, see Using Cost + /// Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + /// in the Amazon Web Services Billing and Cost Management User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -66,7 +71,8 @@ pub struct ModelExplainabilityJobDefinitionJobResourcesClusterConfig { pub volume_size_in_gb: Option, } -/// Configures the model explainability job to run a specified Docker container image. +/// Configures the model explainability job to run a specified Docker container +/// image. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionModelExplainabilityAppSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "configURI")] @@ -139,12 +145,14 @@ pub struct ModelExplainabilityJobDefinitionModelExplainabilityJobOutputConfig { /// The output object for a monitoring job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionModelExplainabilityJobOutputConfigMonitoringOutputs { - /// Information about where and how you want to store the results of a monitoring job. + /// Information about where and how you want to store the results of a monitoring + /// job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3Output")] pub s3_output: Option, } -/// Information about where and how you want to store the results of a monitoring job. +/// Information about where and how you want to store the results of a monitoring +/// job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionModelExplainabilityJobOutputConfigMonitoringOutputsS3Output { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -162,12 +170,20 @@ pub struct ModelExplainabilityJobDefinitionNetworkConfig { pub enable_inter_container_traffic_encryption: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionNetworkConfigVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -183,9 +199,21 @@ pub struct ModelExplainabilityJobDefinitionStoppingCondition { pub max_runtime_in_seconds: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -197,28 +225,45 @@ pub struct ModelExplainabilityJobDefinitionTags { /// ModelExplainabilityJobDefinitionStatus defines the observed state of ModelExplainabilityJobDefinition #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelExplainabilityJobDefinitionStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackagegroups.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackagegroups.rs index e127b41b1..3c0549cb3 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackagegroups.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackagegroups.rs @@ -5,8 +5,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; -/// ModelPackageGroupSpec defines the desired state of ModelPackageGroup. -/// A group of versioned models in the model registry. +/// ModelPackageGroupSpec defines the desired state of ModelPackageGroup. +/// +/// +/// A group of versioned models in the model registry. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "ModelPackageGroup", plural = "modelpackagegroups")] #[kube(namespaced)] @@ -19,14 +21,28 @@ pub struct ModelPackageGroupSpec { /// The name of the model group. #[serde(rename = "modelPackageGroupName")] pub model_package_group_name: String, - /// A list of key value pairs associated with the model group. For more information, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon Web Services General Reference Guide. + /// A list of key value pairs associated with the model group. For more information, + /// see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + /// in the Amazon Web Services General Reference Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageGroupTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -38,10 +54,15 @@ pub struct ModelPackageGroupTags { /// ModelPackageGroupStatus defines the observed state of ModelPackageGroup #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageGroupStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The status of the model group. @@ -49,20 +70,32 @@ pub struct ModelPackageGroupStatus { pub model_package_group_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageGroupStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageGroupStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackages.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackages.rs index fa46f825d..a11ca93a8 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackages.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelpackages.rs @@ -6,22 +6,30 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// ModelPackageSpec defines the desired state of ModelPackage. -/// A versioned model that can be deployed for SageMaker inference. +/// ModelPackageSpec defines the desired state of ModelPackage. +/// +/// +/// A versioned model that can be deployed for SageMaker inference. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "ModelPackage", plural = "modelpackages")] #[kube(namespaced)] #[kube(status = "ModelPackageStatus")] #[kube(schema = "disabled")] pub struct ModelPackageSpec { - /// An array of additional Inference Specification objects. Each additional Inference Specification specifies artifacts based on this model package that can be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts. + /// An array of additional Inference Specification objects. Each additional Inference + /// Specification specifies artifacts based on this model package that can be + /// used on inference endpoints. Generally used with SageMaker Neo to store the + /// compiled artifacts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "additionalInferenceSpecifications")] pub additional_inference_specifications: Option>, /// A description for the approval status of the model. #[serde(default, skip_serializing_if = "Option::is_none", rename = "approvalDescription")] pub approval_description: Option, - /// Whether to certify the model package for listing on Amazon Web Services Marketplace. - /// This parameter is optional for unversioned models, and does not apply to versioned models. + /// Whether to certify the model package for listing on Amazon Web Services Marketplace. + /// + /// + /// This parameter is optional for unversioned models, and does not apply to + /// versioned models. #[serde(default, skip_serializing_if = "Option::is_none", rename = "certifyForMarketplace")] pub certify_for_marketplace: Option, /// A unique token that guarantees that the call to this API is idempotent. @@ -30,24 +38,44 @@ pub struct ModelPackageSpec { /// The metadata properties associated with the model package versions. #[serde(default, skip_serializing_if = "Option::is_none", rename = "customerMetadataProperties")] pub customer_metadata_properties: Option>, - /// The machine learning domain of your model package and its components. Common machine learning domains include computer vision and natural language processing. + /// The machine learning domain of your model package and its components. Common + /// machine learning domains include computer vision and natural language processing. #[serde(default, skip_serializing_if = "Option::is_none")] pub domain: Option, - /// Represents the drift check baselines that can be used when the model monitor is set using the model package. For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines (https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-quality-clarify-baseline-lifecycle.html#pipelines-quality-clarify-baseline-drift-detection) in the Amazon SageMaker Developer Guide. + /// Represents the drift check baselines that can be used when the model monitor + /// is set using the model package. For more information, see the topic on Drift + /// Detection against Previous Baselines in SageMaker Pipelines (https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-quality-clarify-baseline-lifecycle.html#pipelines-quality-clarify-baseline-drift-detection) + /// in the Amazon SageMaker Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "driftCheckBaselines")] pub drift_check_baselines: Option, - /// Specifies details about inference jobs that can be run with models based on this model package, including the following: - /// * The Amazon ECR paths of containers that contain the inference code and model artifacts. - /// * The instance types that the model package supports for transform jobs and real-time endpoints used for inference. - /// * The input and output content formats that the model package supports for inference. + /// Specifies details about inference jobs that can be run with models based + /// on this model package, including the following: + /// + /// + /// * The Amazon ECR paths of containers that contain the inference code and + /// model artifacts. + /// + /// + /// * The instance types that the model package supports for transform jobs + /// and real-time endpoints used for inference. + /// + /// + /// * The input and output content formats that the model package supports + /// for inference. #[serde(default, skip_serializing_if = "Option::is_none", rename = "inferenceSpecification")] pub inference_specification: Option, /// Metadata properties of the tracking entity, trial, or trial component. #[serde(default, skip_serializing_if = "Option::is_none", rename = "metadataProperties")] pub metadata_properties: Option, - /// Whether the model is approved for deployment. - /// This parameter is optional for versioned models, and does not apply to unversioned models. - /// For versioned models, the value of this parameter must be set to Approved to deploy the model. + /// Whether the model is approved for deployment. + /// + /// + /// This parameter is optional for versioned models, and does not apply to unversioned + /// models. + /// + /// + /// For versioned models, the value of this parameter must be set to Approved + /// to deploy the model. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelApprovalStatus")] pub model_approval_status: Option, /// A structure that contains model metrics reports. @@ -56,33 +84,57 @@ pub struct ModelPackageSpec { /// A description of the model package. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelPackageDescription")] pub model_package_description: Option, - /// The name or Amazon Resource Name (ARN) of the model package group that this model version belongs to. - /// This parameter is required for versioned models, and does not apply to unversioned models. + /// The name or Amazon Resource Name (ARN) of the model package group that this + /// model version belongs to. + /// + /// + /// This parameter is required for versioned models, and does not apply to unversioned + /// models. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelPackageGroupName")] pub model_package_group_name: Option, - /// The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen). - /// This parameter is required for unversioned models. It is not applicable to versioned models. + /// The name of the model package. The name must have 1 to 63 characters. Valid + /// characters are a-z, A-Z, 0-9, and - (hyphen). + /// + /// + /// This parameter is required for unversioned models. It is not applicable to + /// versioned models. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelPackageName")] pub model_package_name: Option, - /// The Amazon Simple Storage Service (Amazon S3) path where the sample payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). This archive can hold multiple files that are all equally used in the load test. Each file in the archive must satisfy the size constraints of the InvokeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpoint.html#API_runtime_InvokeEndpoint_RequestSyntax) call. + /// The Amazon Simple Storage Service (Amazon S3) path where the sample payload + /// is stored. This path must point to a single gzip compressed tar archive (.tar.gz + /// suffix). This archive can hold multiple files that are all equally used in + /// the load test. Each file in the archive must satisfy the size constraints + /// of the InvokeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_runtime_InvokeEndpoint.html#API_runtime_InvokeEndpoint_RequestSyntax) + /// call. #[serde(default, skip_serializing_if = "Option::is_none", rename = "samplePayloadURL")] pub sample_payload_url: Option, /// Details about the algorithm that was used to create the model package. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sourceAlgorithmSpecification")] pub source_algorithm_specification: Option, - /// A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon Web Services General Reference Guide. + /// A list of key value pairs associated with the model. For more information, + /// see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + /// in the Amazon Web Services General Reference Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// The machine learning task your model package accomplishes. Common machine learning tasks include object detection and image classification. The following tasks are supported by Inference Recommender: "IMAGE_CLASSIFICATION" | "OBJECT_DETECTION" | "TEXT_GENERATION" |"IMAGE_SEGMENTATION" | "FILL_MASK" | "CLASSIFICATION" | "REGRESSION" | "OTHER". - /// Specify "OTHER" if none of the tasks listed fit your use case. + /// The machine learning task your model package accomplishes. Common machine + /// learning tasks include object detection and image classification. The following + /// tasks are supported by Inference Recommender: "IMAGE_CLASSIFICATION" | "OBJECT_DETECTION" + /// | "TEXT_GENERATION" |"IMAGE_SEGMENTATION" | "FILL_MASK" | "CLASSIFICATION" + /// | "REGRESSION" | "OTHER". + /// + /// + /// Specify "OTHER" if none of the tasks listed fit your use case. #[serde(default, skip_serializing_if = "Option::is_none")] pub task: Option, - /// Specifies configurations for one or more transform jobs that SageMaker runs to test the model package. + /// Specifies configurations for one or more transform jobs that SageMaker runs + /// to test the model package. #[serde(default, skip_serializing_if = "Option::is_none", rename = "validationSpecification")] pub validation_specification: Option, } -/// A structure of additional Inference Specification. Additional Inference Specification specifies details about inference jobs that can be run with models based on this model package +/// A structure of additional Inference Specification. Additional Inference Specification +/// specifies details about inference jobs that can be run with models based +/// on this model package #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageAdditionalInferenceSpecifications { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -134,24 +186,32 @@ pub struct ModelPackageAdditionalInferenceSpecificationsContainersModelInput { pub data_input_config: Option, } -/// Represents the drift check baselines that can be used when the model monitor is set using the model package. For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines (https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-quality-clarify-baseline-lifecycle.html#pipelines-quality-clarify-baseline-drift-detection) in the Amazon SageMaker Developer Guide. +/// Represents the drift check baselines that can be used when the model monitor +/// is set using the model package. For more information, see the topic on Drift +/// Detection against Previous Baselines in SageMaker Pipelines (https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-quality-clarify-baseline-lifecycle.html#pipelines-quality-clarify-baseline-drift-detection) +/// in the Amazon SageMaker Developer Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageDriftCheckBaselines { - /// Represents the drift check bias baselines that can be used when the model monitor is set using the model package. + /// Represents the drift check bias baselines that can be used when the model + /// monitor is set using the model package. #[serde(default, skip_serializing_if = "Option::is_none")] pub bias: Option, - /// Represents the drift check explainability baselines that can be used when the model monitor is set using the model package. + /// Represents the drift check explainability baselines that can be used when + /// the model monitor is set using the model package. #[serde(default, skip_serializing_if = "Option::is_none")] pub explainability: Option, - /// Represents the drift check data quality baselines that can be used when the model monitor is set using the model package. + /// Represents the drift check data quality baselines that can be used when the + /// model monitor is set using the model package. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelDataQuality")] pub model_data_quality: Option, - /// Represents the drift check model quality baselines that can be used when the model monitor is set using the model package. + /// Represents the drift check model quality baselines that can be used when + /// the model monitor is set using the model package. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelQuality")] pub model_quality: Option, } -/// Represents the drift check bias baselines that can be used when the model monitor is set using the model package. +/// Represents the drift check bias baselines that can be used when the model +/// monitor is set using the model package. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageDriftCheckBaselinesBias { /// Contains details regarding the file source. @@ -198,7 +258,8 @@ pub struct ModelPackageDriftCheckBaselinesBiasPreTrainingConstraints { pub s3_uri: Option, } -/// Represents the drift check explainability baselines that can be used when the model monitor is set using the model package. +/// Represents the drift check explainability baselines that can be used when +/// the model monitor is set using the model package. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageDriftCheckBaselinesExplainability { /// Contains details regarding the file source. @@ -231,7 +292,8 @@ pub struct ModelPackageDriftCheckBaselinesExplainabilityConstraints { pub s3_uri: Option, } -/// Represents the drift check data quality baselines that can be used when the model monitor is set using the model package. +/// Represents the drift check data quality baselines that can be used when the +/// model monitor is set using the model package. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageDriftCheckBaselinesModelDataQuality { /// Details about the metrics source. @@ -264,7 +326,8 @@ pub struct ModelPackageDriftCheckBaselinesModelDataQualityStatistics { pub s3_uri: Option, } -/// Represents the drift check model quality baselines that can be used when the model monitor is set using the model package. +/// Represents the drift check model quality baselines that can be used when +/// the model monitor is set using the model package. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageDriftCheckBaselinesModelQuality { /// Details about the metrics source. @@ -297,10 +360,20 @@ pub struct ModelPackageDriftCheckBaselinesModelQualityStatistics { pub s3_uri: Option, } -/// Specifies details about inference jobs that can be run with models based on this model package, including the following: -/// * The Amazon ECR paths of containers that contain the inference code and model artifacts. -/// * The instance types that the model package supports for transform jobs and real-time endpoints used for inference. -/// * The input and output content formats that the model package supports for inference. +/// Specifies details about inference jobs that can be run with models based +/// on this model package, including the following: +/// +/// +/// * The Amazon ECR paths of containers that contain the inference code and +/// model artifacts. +/// +/// +/// * The instance types that the model package supports for transform jobs +/// and real-time endpoints used for inference. +/// +/// +/// * The input and output content formats that the model package supports +/// for inference. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageInferenceSpecification { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -517,7 +590,9 @@ pub struct ModelPackageSourceAlgorithmSpecification { pub source_algorithms: Option>, } -/// Specifies an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your SageMaker account or an algorithm in Amazon Web Services Marketplace that you are subscribed to. +/// Specifies an algorithm that was used to create the model package. The algorithm +/// must be either an algorithm resource in your SageMaker account or an algorithm +/// in Amazon Web Services Marketplace that you are subscribed to. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageSourceAlgorithmSpecificationSourceAlgorithms { #[serde(default, skip_serializing_if = "Option::is_none", rename = "algorithmName")] @@ -526,9 +601,21 @@ pub struct ModelPackageSourceAlgorithmSpecificationSourceAlgorithms { pub model_data_url: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -537,7 +624,8 @@ pub struct ModelPackageTags { pub value: Option, } -/// Specifies configurations for one or more transform jobs that SageMaker runs to test the model package. +/// Specifies configurations for one or more transform jobs that SageMaker runs +/// to test the model package. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageValidationSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "validationProfiles")] @@ -546,18 +634,24 @@ pub struct ModelPackageValidationSpecification { pub validation_role: Option, } -/// Contains data, such as the inputs and targeted instance types that are used in the process of validating the model package. -/// The data provided in the validation profile is made available to your buyers on Amazon Web Services Marketplace. +/// Contains data, such as the inputs and targeted instance types that are used +/// in the process of validating the model package. +/// +/// +/// The data provided in the validation profile is made available to your buyers +/// on Amazon Web Services Marketplace. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageValidationSpecificationValidationProfiles { #[serde(default, skip_serializing_if = "Option::is_none", rename = "profileName")] pub profile_name: Option, - /// Defines the input needed to run a transform job using the inference specification specified in the algorithm. + /// Defines the input needed to run a transform job using the inference specification + /// specified in the algorithm. #[serde(default, skip_serializing_if = "Option::is_none", rename = "transformJobDefinition")] pub transform_job_definition: Option, } -/// Defines the input needed to run a transform job using the inference specification specified in the algorithm. +/// Defines the input needed to run a transform job using the inference specification +/// specified in the algorithm. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageValidationSpecificationValidationProfilesTransformJobDefinition { #[serde(default, skip_serializing_if = "Option::is_none", rename = "batchStrategy")] @@ -568,18 +662,21 @@ pub struct ModelPackageValidationSpecificationValidationProfilesTransformJobDefi pub max_concurrent_transforms: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxPayloadInMB")] pub max_payload_in_mb: Option, - /// Describes the input source of a transform job and the way the transform job consumes it. + /// Describes the input source of a transform job and the way the transform job + /// consumes it. #[serde(default, skip_serializing_if = "Option::is_none", rename = "transformInput")] pub transform_input: Option, /// Describes the results of a transform job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "transformOutput")] pub transform_output: Option, - /// Describes the resources, including ML instance types and ML instance count, to use for transform job. + /// Describes the resources, including ML instance types and ML instance count, + /// to use for transform job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "transformResources")] pub transform_resources: Option, } -/// Describes the input source of a transform job and the way the transform job consumes it. +/// Describes the input source of a transform job and the way the transform job +/// consumes it. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageValidationSpecificationValidationProfilesTransformJobDefinitionTransformInput { #[serde(default, skip_serializing_if = "Option::is_none", rename = "compressionType")] @@ -623,7 +720,8 @@ pub struct ModelPackageValidationSpecificationValidationProfilesTransformJobDefi pub s3_output_path: Option, } -/// Describes the resources, including ML instance types and ML instance count, to use for transform job. +/// Describes the resources, including ML instance types and ML instance count, +/// to use for transform job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageValidationSpecificationValidationProfilesTransformJobDefinitionTransformResources { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -637,10 +735,15 @@ pub struct ModelPackageValidationSpecificationValidationProfilesTransformJobDefi /// ModelPackageStatus defines the observed state of ModelPackage #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// A timestamp specifying when the model package was created. @@ -657,20 +760,32 @@ pub struct ModelPackageStatus { pub model_package_status_details: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPackageStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelqualityjobdefinitions.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelqualityjobdefinitions.rs index 498539575..e239c0af4 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelqualityjobdefinitions.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/modelqualityjobdefinitions.rs @@ -34,13 +34,16 @@ pub struct ModelQualityJobDefinitionSpec { /// Specifies the network configuration for the monitoring job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkConfig")] pub network_config: Option, - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + /// to perform tasks on your behalf. #[serde(rename = "roleARN")] pub role_arn: String, /// A time limit for how long the monitoring job is allowed to run before stopping. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppingCondition")] pub stopping_condition: Option, - /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide. + /// (Optional) An array of key-value pairs. For more information, see Using Cost + /// Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + /// in the Amazon Web Services Billing and Cost Management User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -157,12 +160,14 @@ pub struct ModelQualityJobDefinitionModelQualityJobOutputConfig { /// The output object for a monitoring job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelQualityJobDefinitionModelQualityJobOutputConfigMonitoringOutputs { - /// Information about where and how you want to store the results of a monitoring job. + /// Information about where and how you want to store the results of a monitoring + /// job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3Output")] pub s3_output: Option, } -/// Information about where and how you want to store the results of a monitoring job. +/// Information about where and how you want to store the results of a monitoring +/// job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelQualityJobDefinitionModelQualityJobOutputConfigMonitoringOutputsS3Output { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -180,12 +185,20 @@ pub struct ModelQualityJobDefinitionNetworkConfig { pub enable_inter_container_traffic_encryption: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelQualityJobDefinitionNetworkConfigVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -201,9 +214,21 @@ pub struct ModelQualityJobDefinitionStoppingCondition { pub max_runtime_in_seconds: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelQualityJobDefinitionTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -215,28 +240,45 @@ pub struct ModelQualityJobDefinitionTags { /// ModelQualityJobDefinitionStatus defines the observed state of ModelQualityJobDefinition #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelQualityJobDefinitionStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelQualityJobDefinitionStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelQualityJobDefinitionStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/models.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/models.rs index 8e03526c8..39da31f5a 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/models.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/models.rs @@ -6,8 +6,10 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// ModelSpec defines the desired state of Model. -/// The properties of a model as returned by the Search API. +/// ModelSpec defines the desired state of Model. +/// +/// +/// The properties of a model as returned by the Search API. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "Model", plural = "models")] #[kube(namespaced)] @@ -17,11 +19,18 @@ pub struct ModelSpec { /// Specifies the containers in the inference pipeline. #[serde(default, skip_serializing_if = "Option::is_none")] pub containers: Option>, - /// Isolates the model container. No inbound or outbound network calls can be made to or from the model container. + /// Isolates the model container. No inbound or outbound network calls can be + /// made to or from the model container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). - /// To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume + /// to access model artifacts and docker image for deployment on ML compute instances + /// or for batch transform jobs. Deploying on ML compute instances is part of + /// model hosting. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + /// + /// + /// To be able to pass this role to SageMaker, the caller of this API must have + /// the iam:PassRole permission. #[serde(rename = "executionRoleARN")] pub execution_role_arn: String, /// Specifies details of how containers in a multi-container endpoint are called. @@ -30,13 +39,23 @@ pub struct ModelSpec { /// The name of the new model. #[serde(rename = "modelName")] pub model_name: String, - /// The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions. + /// The location of the primary docker image containing inference code, associated + /// artifacts, and custom environment map that the inference code uses when the + /// model is deployed for predictions. #[serde(default, skip_serializing_if = "Option::is_none", rename = "primaryContainer")] pub primary_container: Option, - /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + /// An array of key-value pairs. You can use tags to categorize your Amazon Web + /// Services resources in different ways, for example, by purpose, owner, or + /// environment. For more information, see Tagging Amazon Web Services Resources + /// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// A VpcConfig object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. VpcConfig is used in hosting services and in batch transform. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). + /// A VpcConfig object that specifies the VPC that you want your model to connect + /// to. Control access to and from your model container by configuring the VPC. + /// VpcConfig is used in hosting services and in batch transform. For more information, + /// see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private + /// Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } @@ -50,7 +69,8 @@ pub struct ModelContainers { pub environment: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). + /// Specifies whether the model container is in Amazon ECR or a private Docker + /// registry accessible from your Amazon Virtual Private Cloud (VPC). #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageConfig")] pub image_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "inferenceSpecificationName")] @@ -66,17 +86,26 @@ pub struct ModelContainers { pub multi_model_config: Option, } -/// Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). +/// Specifies whether the model container is in Amazon ECR or a private Docker +/// registry accessible from your Amazon Virtual Private Cloud (VPC). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelContainersImageConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "repositoryAccessMode")] pub repository_access_mode: Option, - /// Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field of the ImageConfig object that you passed to a call to CreateModel and the private Docker registry where the model image is hosted requires authentication. + /// Specifies an authentication configuration for the private docker registry + /// where your model image is hosted. Specify a value for this property only + /// if you specified Vpc as the value for the RepositoryAccessMode field of the + /// ImageConfig object that you passed to a call to CreateModel and the private + /// Docker registry where the model image is hosted requires authentication. #[serde(default, skip_serializing_if = "Option::is_none", rename = "repositoryAuthConfig")] pub repository_auth_config: Option, } -/// Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field of the ImageConfig object that you passed to a call to CreateModel and the private Docker registry where the model image is hosted requires authentication. +/// Specifies an authentication configuration for the private docker registry +/// where your model image is hosted. Specify a value for this property only +/// if you specified Vpc as the value for the RepositoryAccessMode field of the +/// ImageConfig object that you passed to a call to CreateModel and the private +/// Docker registry where the model image is hosted requires authentication. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelContainersImageConfigRepositoryAuthConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "repositoryCredentialsProviderARN")] @@ -97,7 +126,9 @@ pub struct ModelInferenceExecutionConfig { pub mode: Option, } -/// The location of the primary docker image containing inference code, associated artifacts, and custom environment map that the inference code uses when the model is deployed for predictions. +/// The location of the primary docker image containing inference code, associated +/// artifacts, and custom environment map that the inference code uses when the +/// model is deployed for predictions. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPrimaryContainer { #[serde(default, skip_serializing_if = "Option::is_none", rename = "containerHostname")] @@ -106,7 +137,8 @@ pub struct ModelPrimaryContainer { pub environment: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, - /// Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). + /// Specifies whether the model container is in Amazon ECR or a private Docker + /// registry accessible from your Amazon Virtual Private Cloud (VPC). #[serde(default, skip_serializing_if = "Option::is_none", rename = "imageConfig")] pub image_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "inferenceSpecificationName")] @@ -122,17 +154,26 @@ pub struct ModelPrimaryContainer { pub multi_model_config: Option, } -/// Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). +/// Specifies whether the model container is in Amazon ECR or a private Docker +/// registry accessible from your Amazon Virtual Private Cloud (VPC). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPrimaryContainerImageConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "repositoryAccessMode")] pub repository_access_mode: Option, - /// Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field of the ImageConfig object that you passed to a call to CreateModel and the private Docker registry where the model image is hosted requires authentication. + /// Specifies an authentication configuration for the private docker registry + /// where your model image is hosted. Specify a value for this property only + /// if you specified Vpc as the value for the RepositoryAccessMode field of the + /// ImageConfig object that you passed to a call to CreateModel and the private + /// Docker registry where the model image is hosted requires authentication. #[serde(default, skip_serializing_if = "Option::is_none", rename = "repositoryAuthConfig")] pub repository_auth_config: Option, } -/// Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified Vpc as the value for the RepositoryAccessMode field of the ImageConfig object that you passed to a call to CreateModel and the private Docker registry where the model image is hosted requires authentication. +/// Specifies an authentication configuration for the private docker registry +/// where your model image is hosted. Specify a value for this property only +/// if you specified Vpc as the value for the RepositoryAccessMode field of the +/// ImageConfig object that you passed to a call to CreateModel and the private +/// Docker registry where the model image is hosted requires authentication. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelPrimaryContainerImageConfigRepositoryAuthConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "repositoryCredentialsProviderARN")] @@ -146,9 +187,21 @@ pub struct ModelPrimaryContainerMultiModelConfig { pub model_cache_setting: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -157,7 +210,12 @@ pub struct ModelTags { pub value: Option, } -/// A VpcConfig object that specifies the VPC that you want your model to connect to. Control access to and from your model container by configuring the VPC. VpcConfig is used in hosting services and in batch transform. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). +/// A VpcConfig object that specifies the VPC that you want your model to connect +/// to. Control access to and from your model container by configuring the VPC. +/// VpcConfig is used in hosting services and in batch transform. For more information, +/// see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private +/// Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -169,28 +227,45 @@ pub struct ModelVpcConfig { /// ModelStatus defines the observed state of Model #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ModelStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/monitoringschedules.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/monitoringschedules.rs index 1c24bcaa0..d433e1c45 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/monitoringschedules.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/monitoringschedules.rs @@ -6,26 +6,34 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// MonitoringScheduleSpec defines the desired state of MonitoringSchedule. -/// A schedule for a model monitoring job. For information about model monitor, see Amazon SageMaker Model Monitor (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html). +/// MonitoringScheduleSpec defines the desired state of MonitoringSchedule. +/// +/// +/// A schedule for a model monitoring job. For information about model monitor, +/// see Amazon SageMaker Model Monitor (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html). #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "MonitoringSchedule", plural = "monitoringschedules")] #[kube(namespaced)] #[kube(status = "MonitoringScheduleStatus")] #[kube(schema = "disabled")] pub struct MonitoringScheduleSpec { - /// The configuration object that specifies the monitoring schedule and defines the monitoring job. + /// The configuration object that specifies the monitoring schedule and defines + /// the monitoring job. #[serde(rename = "monitoringScheduleConfig")] pub monitoring_schedule_config: MonitoringScheduleMonitoringScheduleConfig, - /// The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within an Amazon Web Services account. + /// The name of the monitoring schedule. The name must be unique within an Amazon + /// Web Services Region within an Amazon Web Services account. #[serde(rename = "monitoringScheduleName")] pub monitoring_schedule_name: String, - /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide. + /// (Optional) An array of key-value pairs. For more information, see Using Cost + /// Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + /// in the Amazon Web Services Billing and Cost Management User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// The configuration object that specifies the monitoring schedule and defines the monitoring job. +/// The configuration object that specifies the monitoring schedule and defines +/// the monitoring job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleMonitoringScheduleConfig { /// Defines the monitoring job. @@ -43,7 +51,9 @@ pub struct MonitoringScheduleMonitoringScheduleConfig { /// Defines the monitoring job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinition { - /// Configuration for monitoring constraints and monitoring statistics. These baseline resources are compared against the results of the current job from the series of jobs scheduled to collect data periodically. + /// Configuration for monitoring constraints and monitoring statistics. These + /// baseline resources are compared against the results of the current job from + /// the series of jobs scheduled to collect data periodically. #[serde(default, skip_serializing_if = "Option::is_none", rename = "baselineConfig")] pub baseline_config: Option, #[serde(default, skip_serializing_if = "Option::is_none")] @@ -59,7 +69,10 @@ pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinition { /// Identifies the resources to deploy for a monitoring job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "monitoringResources")] pub monitoring_resources: Option, - /// Networking options for a job, such as network traffic encryption between containers, whether to allow inbound and outbound network calls to and from containers, and the VPC subnets and security groups to use for VPC-enabled jobs. + /// Networking options for a job, such as network traffic encryption between + /// containers, whether to allow inbound and outbound network calls to and from + /// containers, and the VPC subnets and security groups to use for VPC-enabled + /// jobs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkConfig")] pub network_config: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "roleARN")] @@ -69,7 +82,9 @@ pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinition { pub stopping_condition: Option, } -/// Configuration for monitoring constraints and monitoring statistics. These baseline resources are compared against the results of the current job from the series of jobs scheduled to collect data periodically. +/// Configuration for monitoring constraints and monitoring statistics. These +/// baseline resources are compared against the results of the current job from +/// the series of jobs scheduled to collect data periodically. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionBaselineConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "baseliningJobName")] @@ -156,12 +171,14 @@ pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMoni /// The output object for a monitoring job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputs { - /// Information about where and how you want to store the results of a monitoring job. + /// Information about where and how you want to store the results of a monitoring + /// job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3Output")] pub s3_output: Option, } -/// Information about where and how you want to store the results of a monitoring job. +/// Information about where and how you want to store the results of a monitoring +/// job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMonitoringOutputConfigMonitoringOutputsS3Output { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -193,19 +210,30 @@ pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionMoni pub volume_size_in_gb: Option, } -/// Networking options for a job, such as network traffic encryption between containers, whether to allow inbound and outbound network calls to and from containers, and the VPC subnets and security groups to use for VPC-enabled jobs. +/// Networking options for a job, such as network traffic encryption between +/// containers, whether to allow inbound and outbound network calls to and from +/// containers, and the VPC subnets and security groups to use for VPC-enabled +/// jobs. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableInterContainerTrafficEncryption")] pub enable_inter_container_traffic_encryption: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleMonitoringScheduleConfigMonitoringJobDefinitionNetworkConfigVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -228,9 +256,21 @@ pub struct MonitoringScheduleMonitoringScheduleConfigScheduleConfig { pub schedule_expression: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -242,16 +282,22 @@ pub struct MonitoringScheduleTags { /// MonitoringScheduleStatus defines the observed state of MonitoringSchedule #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The time at which the monitoring job was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationTime")] pub creation_time: Option, - /// A string, up to one KB in size, that contains the reason a monitoring job failed, if it failed. + /// A string, up to one KB in size, that contains the reason a monitoring job + /// failed, if it failed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] pub failure_reason: Option, /// The time at which the monitoring job was last modified. @@ -265,20 +311,32 @@ pub struct MonitoringScheduleStatus { pub monitoring_schedule_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct MonitoringScheduleStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstancelifecycleconfigs.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstancelifecycleconfigs.rs index ec1b6c74d..bc8b6d1db 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstancelifecycleconfigs.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstancelifecycleconfigs.rs @@ -15,32 +15,65 @@ pub struct NotebookInstanceLifecycleConfigSpec { /// The name of the lifecycle configuration. #[serde(rename = "notebookInstanceLifecycleConfigName")] pub notebook_instance_lifecycle_config_name: String, - /// A shell script that runs only once, when you create a notebook instance. The shell script must be a base64-encoded string. + /// A shell script that runs only once, when you create a notebook instance. + /// The shell script must be a base64-encoded string. #[serde(default, skip_serializing_if = "Option::is_none", rename = "onCreate")] pub on_create: Option>, - /// A shell script that runs every time you start a notebook instance, including when you create the notebook instance. The shell script must be a base64-encoded string. + /// A shell script that runs every time you start a notebook instance, including + /// when you create the notebook instance. The shell script must be a base64-encoded + /// string. #[serde(default, skip_serializing_if = "Option::is_none", rename = "onStart")] pub on_start: Option>, } -/// Contains the notebook instance lifecycle configuration script. -/// Each lifecycle configuration script has a limit of 16384 characters. -/// The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin. -/// View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook]. -/// Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started. -/// For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). +/// Contains the notebook instance lifecycle configuration script. +/// +/// +/// Each lifecycle configuration script has a limit of 16384 characters. +/// +/// +/// The value of the $PATH environment variable that is available to both scripts +/// is /sbin:bin:/usr/sbin:/usr/bin. +/// +/// +/// View CloudWatch Logs for notebook instance lifecycle configurations in log +/// group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook]. +/// +/// +/// Lifecycle configuration scripts cannot run for longer than 5 minutes. If +/// a script runs for longer than 5 minutes, it fails and the notebook instance +/// is not created or started. +/// +/// +/// For information about notebook instance lifestyle configurations, see Step +/// 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceLifecycleConfigOnCreate { #[serde(default, skip_serializing_if = "Option::is_none")] pub content: Option, } -/// Contains the notebook instance lifecycle configuration script. -/// Each lifecycle configuration script has a limit of 16384 characters. -/// The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin. -/// View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook]. -/// Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started. -/// For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). +/// Contains the notebook instance lifecycle configuration script. +/// +/// +/// Each lifecycle configuration script has a limit of 16384 characters. +/// +/// +/// The value of the $PATH environment variable that is available to both scripts +/// is /sbin:bin:/usr/sbin:/usr/bin. +/// +/// +/// View CloudWatch Logs for notebook instance lifecycle configurations in log +/// group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook]. +/// +/// +/// Lifecycle configuration scripts cannot run for longer than 5 minutes. If +/// a script runs for longer than 5 minutes, it fails and the notebook instance +/// is not created or started. +/// +/// +/// For information about notebook instance lifestyle configurations, see Step +/// 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceLifecycleConfigOnStart { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -50,10 +83,15 @@ pub struct NotebookInstanceLifecycleConfigOnStart { /// NotebookInstanceLifecycleConfigStatus defines the observed state of NotebookInstanceLifecycleConfig #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceLifecycleConfigStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// A timestamp that tells when the lifecycle configuration was created. @@ -64,20 +102,32 @@ pub struct NotebookInstanceLifecycleConfigStatus { pub last_modified_time: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceLifecycleConfigStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceLifecycleConfigStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstances.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstances.rs index 6ea6945d9..f995a19b1 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstances.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/notebookinstances.rs @@ -12,26 +12,55 @@ use serde::{Serialize, Deserialize}; #[kube(status = "NotebookInstanceStatus")] #[kube(schema = "disabled")] pub struct NotebookInstanceSpec { - /// A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + /// A list of Elastic Inference (EI) instance types to associate with this notebook + /// instance. Currently, only one instance type can be associated with a notebook + /// instance. For more information, see Using Elastic Inference in Amazon SageMaker + /// (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "acceleratorTypes")] pub accelerator_types: Option>, - /// An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + /// An array of up to three Git repositories to associate with the notebook instance. + /// These can be either the names of Git repositories stored as resources in + /// your account, or the URL of Git repositories in Amazon Web Services CodeCommit + /// (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + /// in any other Git repository. These repositories are cloned at the same level + /// as the default repository of your notebook instance. For more information, + /// see Associating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "additionalCodeRepositories")] pub additional_code_repositories: Option>, - /// A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + /// A Git repository to associate with the notebook instance as its default code + /// repository. This can be either the name of a Git repository stored as a resource + /// in your account, or the URL of a Git repository in Amazon Web Services CodeCommit + /// (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + /// in any other Git repository. When you open a notebook instance, it opens + /// in the directory that contains this repository. For more information, see + /// Associating Git Repositories with SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultCodeRepository")] pub default_code_repository: Option, - /// Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a NAT Gateway in your VPC. - /// For more information, see Notebook Instances Are Internet-Enabled by Default (https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access). You can set the value of this parameter to Disabled only if you set a value for the SubnetId parameter. + /// Sets whether SageMaker provides internet access to the notebook instance. + /// If you set this to Disabled this notebook instance is able to access resources + /// only in your VPC, and is not be able to connect to SageMaker training and + /// endpoint services unless you configure a NAT Gateway in your VPC. + /// + /// + /// For more information, see Notebook Instances Are Internet-Enabled by Default + /// (https://docs.aws.amazon.com/sagemaker/latest/dg/appendix-additional-considerations.html#appendix-notebook-and-internet-access). + /// You can set the value of this parameter to Disabled only if you set a value + /// for the SubnetId parameter. #[serde(default, skip_serializing_if = "Option::is_none", rename = "directInternetAccess")] pub direct_internet_access: Option, /// The type of ML compute instance to launch for the notebook instance. #[serde(rename = "instanceType")] pub instance_type: String, - /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) in the Amazon Web Services Key Management Service Developer Guide. + /// The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service + /// key that SageMaker uses to encrypt data on the storage volume attached to + /// your notebook instance. The KMS key you provide must be enabled. For information, + /// see Enabling and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) + /// in the Amazon Web Services Key Management Service Developer Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] pub kms_key_id: Option, - /// The name of a lifecycle configuration to associate with the notebook instance. For information about lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). + /// The name of a lifecycle configuration to associate with the notebook instance. + /// For information about lifestyle configurations, see Step 2.1: (Optional) + /// Customize a Notebook Instance (https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleConfigName")] pub lifecycle_config_name: Option, /// The name of the new notebook instance. @@ -40,31 +69,63 @@ pub struct NotebookInstanceSpec { /// The platform identifier of the notebook instance runtime environment. #[serde(default, skip_serializing_if = "Option::is_none", rename = "platformIdentifier")] pub platform_identifier: Option, - /// When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). - /// To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// When you send any requests to Amazon Web Services resources from the notebook + /// instance, SageMaker assumes this role to perform tasks on your behalf. You + /// must grant this role necessary permissions so SageMaker can perform these + /// tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) + /// permissions to assume this role. For more information, see SageMaker Roles + /// (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + /// + /// + /// To be able to pass this role to SageMaker, the caller of this API must have + /// the iam:PassRole permission. #[serde(rename = "roleARN")] pub role_arn: String, - /// Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. - /// Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users. + /// Whether root access is enabled or disabled for users of the notebook instance. + /// The default value is Enabled. + /// + /// + /// Lifecycle configurations need root access to be able to set up a notebook + /// instance. Because of this, lifecycle configurations associated with a notebook + /// instance always run with root access even if you disable root access for + /// users. #[serde(default, skip_serializing_if = "Option::is_none", rename = "rootAccess")] pub root_access: Option, - /// The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet. + /// The VPC security group IDs, in the form sg-xxxxxxxx. The security groups + /// must be for the same VPC as specified in the subnet. #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] pub security_group_i_ds: Option>, - /// The ID of the subnet in a VPC to which you would like to have a connectivity from your ML compute instance. + /// The ID of the subnet in a VPC to which you would like to have a connectivity + /// from your ML compute instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "subnetID")] pub subnet_id: Option, - /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + /// An array of key-value pairs. You can use tags to categorize your Amazon Web + /// Services resources in different ways, for example, by purpose, owner, or + /// environment. For more information, see Tagging Amazon Web Services Resources + /// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// The size, in GB, of the ML storage volume to attach to the notebook instance. The default value is 5 GB. + /// The size, in GB, of the ML storage volume to attach to the notebook instance. + /// The default value is 5 GB. #[serde(default, skip_serializing_if = "Option::is_none", rename = "volumeSizeInGB")] pub volume_size_in_gb: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -76,10 +137,15 @@ pub struct NotebookInstanceTags { /// NotebookInstanceStatus defines the observed state of NotebookInstance #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// If status is Failed, the reason it failed. @@ -88,28 +154,42 @@ pub struct NotebookInstanceStatus { /// The status of the notebook instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "notebookInstanceStatus")] pub notebook_instance_status: Option, - /// The URL that you use to connect to the Jupyter notebook that is running in your notebook instance. + /// The URL that you use to connect to the Jupyter notebook that is running in + /// your notebook instance. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppedByControllerMetadata")] pub stopped_by_controller_metadata: Option, - /// The URL that you use to connect to the Jupyter notebook that is running in your notebook instance. + /// The URL that you use to connect to the Jupyter notebook that is running in + /// your notebook instance. #[serde(default, skip_serializing_if = "Option::is_none")] pub url: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct NotebookInstanceStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/processingjobs.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/processingjobs.rs index ef0a97b59..cb21d740e 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/processingjobs.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/processingjobs.rs @@ -6,8 +6,11 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// ProcessingJobSpec defines the desired state of ProcessingJob. -/// An Amazon SageMaker processing job that is used to analyze data and evaluate models. For more information, see Process Data and Evaluate Models (https://docs.aws.amazon.com/sagemaker/latest/dg/processing-job.html). +/// ProcessingJobSpec defines the desired state of ProcessingJob. +/// +/// +/// An Amazon SageMaker processing job that is used to analyze data and evaluate +/// models. For more information, see Process Data and Evaluate Models (https://docs.aws.amazon.com/sagemaker/latest/dg/processing-job.html). #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "ProcessingJob", plural = "processingjobs")] #[kube(namespaced)] @@ -17,37 +20,53 @@ pub struct ProcessingJobSpec { /// Configures the processing job to run a specified Docker container image. #[serde(rename = "appSpecification")] pub app_specification: ProcessingJobAppSpecification, - /// The environment variables to set in the Docker container. Up to 100 key and values entries in the map are supported. + /// The environment variables to set in the Docker container. Up to 100 key and + /// values entries in the map are supported. #[serde(default, skip_serializing_if = "Option::is_none")] pub environment: Option>, - /// Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: - /// * CreateProcessingJob - /// * CreateTrainingJob - /// * CreateTransformJob + /// Associates a SageMaker job as a trial component with an experiment and trial. + /// Specified when you call the following APIs: + /// + /// + /// * CreateProcessingJob + /// + /// + /// * CreateTrainingJob + /// + /// + /// * CreateTransformJob #[serde(default, skip_serializing_if = "Option::is_none", rename = "experimentConfig")] pub experiment_config: Option, - /// Networking options for a processing job, such as whether to allow inbound and outbound network calls to and from processing containers, and the VPC subnets and security groups to use for VPC-enabled processing jobs. + /// Networking options for a processing job, such as whether to allow inbound + /// and outbound network calls to and from processing containers, and the VPC + /// subnets and security groups to use for VPC-enabled processing jobs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "networkConfig")] pub network_config: Option, /// An array of inputs configuring the data to download into the processing container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "processingInputs")] pub processing_inputs: Option>, - /// The name of the processing job. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account. + /// The name of the processing job. The name must be unique within an Amazon + /// Web Services Region in the Amazon Web Services account. #[serde(rename = "processingJobName")] pub processing_job_name: String, /// Output configuration for the processing job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "processingOutputConfig")] pub processing_output_config: Option, - /// Identifies the resources, ML compute instances, and ML storage volumes to deploy for a processing job. In distributed training, you specify more than one instance. + /// Identifies the resources, ML compute instances, and ML storage volumes to + /// deploy for a processing job. In distributed training, you specify more than + /// one instance. #[serde(rename = "processingResources")] pub processing_resources: ProcessingJobProcessingResources, - /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + /// to perform tasks on your behalf. #[serde(rename = "roleARN")] pub role_arn: String, /// The time limit for how long the processing job is allowed to run. #[serde(default, skip_serializing_if = "Option::is_none", rename = "stoppingCondition")] pub stopping_condition: Option, - /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) in the Amazon Web Services Billing and Cost Management User Guide. + /// (Optional) An array of key-value pairs. For more information, see Using Cost + /// Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + /// in the Amazon Web Services Billing and Cost Management User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } @@ -63,10 +82,17 @@ pub struct ProcessingJobAppSpecification { pub image_uri: Option, } -/// Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: -/// * CreateProcessingJob -/// * CreateTrainingJob -/// * CreateTransformJob +/// Associates a SageMaker job as a trial component with an experiment and trial. +/// Specified when you call the following APIs: +/// +/// +/// * CreateProcessingJob +/// +/// +/// * CreateTrainingJob +/// +/// +/// * CreateTransformJob #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobExperimentConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "experimentName")] @@ -77,19 +103,29 @@ pub struct ProcessingJobExperimentConfig { pub trial_name: Option, } -/// Networking options for a processing job, such as whether to allow inbound and outbound network calls to and from processing containers, and the VPC subnets and security groups to use for VPC-enabled processing jobs. +/// Networking options for a processing job, such as whether to allow inbound +/// and outbound network calls to and from processing containers, and the VPC +/// subnets and security groups to use for VPC-enabled processing jobs. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobNetworkConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableInterContainerTrafficEncryption")] pub enable_inter_container_traffic_encryption: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, - /// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// Specifies a VPC that your training jobs and hosted models have access to. + /// Control access to and from your training and model containers by configuring + /// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual + /// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + /// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// Specifies a VPC that your training jobs and hosted models have access to. Control access to and from your training and model containers by configuring the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// Specifies a VPC that your training jobs and hosted models have access to. +/// Control access to and from your training and model containers by configuring +/// the VPC. For more information, see Protect Endpoints by Using an Amazon Virtual +/// Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) +/// and Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobNetworkConfigVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -98,22 +134,28 @@ pub struct ProcessingJobNetworkConfigVpcConfig { pub subnets: Option>, } -/// The inputs for a processing job. The processing input must specify exactly one of either S3Input or DatasetDefinition types. +/// The inputs for a processing job. The processing input must specify exactly +/// one of either S3Input or DatasetDefinition types. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobProcessingInputs { #[serde(default, skip_serializing_if = "Option::is_none", rename = "appManaged")] pub app_managed: Option, - /// Configuration for Dataset Definition inputs. The Dataset Definition input must specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition types. + /// Configuration for Dataset Definition inputs. The Dataset Definition input + /// must specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition + /// types. #[serde(default, skip_serializing_if = "Option::is_none", rename = "datasetDefinition")] pub dataset_definition: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputName")] pub input_name: Option, - /// Configuration for downloading input data from Amazon S3 into the processing container. + /// Configuration for downloading input data from Amazon S3 into the processing + /// container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3Input")] pub s3_input: Option, } -/// Configuration for Dataset Definition inputs. The Dataset Definition input must specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition types. +/// Configuration for Dataset Definition inputs. The Dataset Definition input +/// must specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition +/// types. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobProcessingInputsDatasetDefinition { /// Configuration for Athena Dataset Definition input. @@ -186,7 +228,8 @@ pub struct ProcessingJobProcessingInputsDatasetDefinitionRedshiftDatasetDefiniti pub query_string: Option, } -/// Configuration for downloading input data from Amazon S3 into the processing container. +/// Configuration for downloading input data from Amazon S3 into the processing +/// container. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobProcessingInputsS3Input { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -212,7 +255,8 @@ pub struct ProcessingJobProcessingOutputConfig { pub outputs: Option>, } -/// Describes the results of a processing job. The processing output must specify exactly one of either S3Output or FeatureStoreOutput types. +/// Describes the results of a processing job. The processing output must specify +/// exactly one of either S3Output or FeatureStoreOutput types. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobProcessingOutputConfigOutputs { #[serde(default, skip_serializing_if = "Option::is_none", rename = "appManaged")] @@ -222,7 +266,8 @@ pub struct ProcessingJobProcessingOutputConfigOutputs { pub feature_store_output: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "outputName")] pub output_name: Option, - /// Configuration for uploading output data to Amazon S3 from the processing container. + /// Configuration for uploading output data to Amazon S3 from the processing + /// container. #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3Output")] pub s3_output: Option, } @@ -234,7 +279,8 @@ pub struct ProcessingJobProcessingOutputConfigOutputsFeatureStoreOutput { pub feature_group_name: Option, } -/// Configuration for uploading output data to Amazon S3 from the processing container. +/// Configuration for uploading output data to Amazon S3 from the processing +/// container. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobProcessingOutputConfigOutputsS3Output { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -245,7 +291,9 @@ pub struct ProcessingJobProcessingOutputConfigOutputsS3Output { pub s3_upload_mode: Option, } -/// Identifies the resources, ML compute instances, and ML storage volumes to deploy for a processing job. In distributed training, you specify more than one instance. +/// Identifies the resources, ML compute instances, and ML storage volumes to +/// deploy for a processing job. In distributed training, you specify more than +/// one instance. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobProcessingResources { /// Configuration for the cluster used to run a processing job. @@ -273,9 +321,21 @@ pub struct ProcessingJobStoppingCondition { pub max_runtime_in_seconds: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -287,13 +347,19 @@ pub struct ProcessingJobTags { /// ProcessingJobStatus defines the observed state of ProcessingJob #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// A string, up to one KB in size, that contains the reason a processing job failed, if it failed. + /// A string, up to one KB in size, that contains the reason a processing job + /// failed, if it failed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] pub failure_reason: Option, /// Provides the status of a processing job. @@ -301,20 +367,32 @@ pub struct ProcessingJobStatus { pub processing_job_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ProcessingJobStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/trainingjobs.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/trainingjobs.rs index 44b8ced82..ed2c4724a 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/trainingjobs.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/trainingjobs.rs @@ -6,95 +6,199 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// TrainingJobSpec defines the desired state of TrainingJob. -/// Contains information about a training job. +/// TrainingJobSpec defines the desired state of TrainingJob. +/// +/// +/// Contains information about a training job. #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "TrainingJob", plural = "trainingjobs")] #[kube(namespaced)] #[kube(status = "TrainingJobStatus")] #[kube(schema = "disabled")] pub struct TrainingJobSpec { - /// The registry path of the Docker image that contains the training algorithm and algorithm-specific metadata, including the input mode. For more information about algorithms provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information about providing your own algorithms, see Using Your Own Algorithms with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). + /// The registry path of the Docker image that contains the training algorithm + /// and algorithm-specific metadata, including the input mode. For more information + /// about algorithms provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// For information about providing your own algorithms, see Using Your Own Algorithms + /// with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). #[serde(rename = "algorithmSpecification")] pub algorithm_specification: TrainingJobAlgorithmSpecification, - /// Contains information about the output location for managed spot training checkpoint data. + /// Contains information about the output location for managed spot training + /// checkpoint data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "checkpointConfig")] pub checkpoint_config: Option, - /// Configuration information for the Amazon SageMaker Debugger hook parameters, metric and tensor collections, and storage paths. To learn more about how to configure the DebugHookConfig parameter, see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html). + /// Configuration information for the Amazon SageMaker Debugger hook parameters, + /// metric and tensor collections, and storage paths. To learn more about how + /// to configure the DebugHookConfig parameter, see Use the SageMaker and Debugger + /// Configuration API Operations to Create, Update, and Debug Your Training Job + /// (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "debugHookConfig")] pub debug_hook_config: Option, - /// Configuration information for Amazon SageMaker Debugger rules for debugging output tensors. + /// Configuration information for Amazon SageMaker Debugger rules for debugging + /// output tensors. #[serde(default, skip_serializing_if = "Option::is_none", rename = "debugRuleConfigurations")] pub debug_rule_configurations: Option>, - /// To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training. For more information, see Protect Communications Between ML Compute Instances in a Distributed Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html). + /// To encrypt all communications between ML compute instances in distributed + /// training, choose True. Encryption provides greater security for distributed + /// training, but training might take longer. How long it takes depends on the + /// amount of communication between compute instances, especially if you use + /// a deep learning algorithm in distributed training. For more information, + /// see Protect Communications Between ML Compute Instances in a Distributed + /// Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableInterContainerTrafficEncryption")] pub enable_inter_container_traffic_encryption: Option, - /// To train models using managed spot training, choose True. Managed spot training provides a fully managed and scalable infrastructure for training machine learning models. this option is useful when training jobs can be interrupted and when there is flexibility when the training job is run. - /// The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be used as a starting point to train models incrementally. Amazon SageMaker provides metrics and logs in CloudWatch. They can be used to see when managed spot training jobs are running, interrupted, resumed, or completed. + /// To train models using managed spot training, choose True. Managed spot training + /// provides a fully managed and scalable infrastructure for training machine + /// learning models. this option is useful when training jobs can be interrupted + /// and when there is flexibility when the training job is run. + /// + /// + /// The complete and intermediate results of jobs are stored in an Amazon S3 + /// bucket, and can be used as a starting point to train models incrementally. + /// Amazon SageMaker provides metrics and logs in CloudWatch. They can be used + /// to see when managed spot training jobs are running, interrupted, resumed, + /// or completed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableManagedSpotTraining")] pub enable_managed_spot_training: Option, - /// Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access. + /// Isolates the training container. No inbound or outbound network calls can + /// be made, except for calls between peers within a training cluster for distributed + /// training. If you enable network isolation for training jobs that are configured + /// to use a VPC, SageMaker downloads and uploads customer data and model artifacts + /// through the specified VPC, but the training container does not have network + /// access. #[serde(default, skip_serializing_if = "Option::is_none", rename = "enableNetworkIsolation")] pub enable_network_isolation: Option, /// The environment variables to set in the Docker container. #[serde(default, skip_serializing_if = "Option::is_none")] pub environment: Option>, - /// Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: - /// * CreateProcessingJob - /// * CreateTrainingJob - /// * CreateTransformJob + /// Associates a SageMaker job as a trial component with an experiment and trial. + /// Specified when you call the following APIs: + /// + /// + /// * CreateProcessingJob + /// + /// + /// * CreateTrainingJob + /// + /// + /// * CreateTransformJob #[serde(default, skip_serializing_if = "Option::is_none", rename = "experimentConfig")] pub experiment_config: Option, - /// Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process. For a list of hyperparameters for each training algorithm provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - /// You can specify a maximum of 100 hyperparameters. Each hyperparameter is a key-value pair. Each key and value is limited to 256 characters, as specified by the Length Constraint. - /// Do not include any security-sensitive information including account access IDs, secrets or tokens in any hyperparameter field. If the use of security-sensitive credentials are detected, SageMaker will reject your training job request and return an exception error. + /// Algorithm-specific parameters that influence the quality of the model. You + /// set hyperparameters before you start the learning process. For a list of + /// hyperparameters for each training algorithm provided by SageMaker, see Algorithms + /// (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// + /// + /// You can specify a maximum of 100 hyperparameters. Each hyperparameter is + /// a key-value pair. Each key and value is limited to 256 characters, as specified + /// by the Length Constraint. + /// + /// + /// Do not include any security-sensitive information including account access + /// IDs, secrets or tokens in any hyperparameter field. If the use of security-sensitive + /// credentials are detected, SageMaker will reject your training job request + /// and return an exception error. #[serde(default, skip_serializing_if = "Option::is_none", rename = "hyperParameters")] pub hyper_parameters: Option>, - /// An array of Channel objects. Each channel is a named input source. InputDataConfig describes the input data and its location. - /// Algorithms can accept input data from one or more channels. For example, an algorithm might have two channels of input data, training_data and validation_data. The configuration for each channel provides the S3, EFS, or FSx location where the input data is stored. It also provides information about the stored data: the MIME type, compression method, and whether the data is wrapped in RecordIO format. - /// Depending on the input mode that the algorithm supports, SageMaker either copies input data files from an S3 bucket to a local directory in the Docker container, or makes it available as input streams. For example, if you specify an EFS location, input data files are available as input streams. They do not need to be downloaded. + /// An array of Channel objects. Each channel is a named input source. InputDataConfig + /// describes the input data and its location. + /// + /// + /// Algorithms can accept input data from one or more channels. For example, + /// an algorithm might have two channels of input data, training_data and validation_data. + /// The configuration for each channel provides the S3, EFS, or FSx location + /// where the input data is stored. It also provides information about the stored + /// data: the MIME type, compression method, and whether the data is wrapped + /// in RecordIO format. + /// + /// + /// Depending on the input mode that the algorithm supports, SageMaker either + /// copies input data files from an S3 bucket to a local directory in the Docker + /// container, or makes it available as input streams. For example, if you specify + /// an EFS location, input data files are available as input streams. They do + /// not need to be downloaded. #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputDataConfig")] pub input_data_config: Option>, - /// Specifies the path to the S3 location where you want to store model artifacts. SageMaker creates subfolders for the artifacts. + /// Specifies the path to the S3 location where you want to store model artifacts. + /// SageMaker creates subfolders for the artifacts. #[serde(rename = "outputDataConfig")] pub output_data_config: TrainingJobOutputDataConfig, - /// Configuration information for Amazon SageMaker Debugger system monitoring, framework profiling, and storage paths. + /// Configuration information for Amazon SageMaker Debugger system monitoring, + /// framework profiling, and storage paths. #[serde(default, skip_serializing_if = "Option::is_none", rename = "profilerConfig")] pub profiler_config: Option, - /// Configuration information for Amazon SageMaker Debugger rules for profiling system and framework metrics. + /// Configuration information for Amazon SageMaker Debugger rules for profiling + /// system and framework metrics. #[serde(default, skip_serializing_if = "Option::is_none", rename = "profilerRuleConfigurations")] pub profiler_rule_configurations: Option>, - /// The resources, including the ML compute instances and ML storage volumes, to use for model training. - /// ML storage volumes store model artifacts and incremental states. Training algorithms might also use ML storage volumes for scratch space. If you want SageMaker to use the ML storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1. + /// The resources, including the ML compute instances and ML storage volumes, + /// to use for model training. + /// + /// + /// ML storage volumes store model artifacts and incremental states. Training + /// algorithms might also use ML storage volumes for scratch space. If you want + /// SageMaker to use the ML storage volume to store the training data, choose + /// File as the TrainingInputMode in the algorithm specification. For distributed + /// training algorithms, specify an instance count greater than 1. #[serde(rename = "resourceConfig")] pub resource_config: TrainingJobResourceConfig, /// The number of times to retry the job when the job fails due to an InternalServerError. #[serde(default, skip_serializing_if = "Option::is_none", rename = "retryStrategy")] pub retry_strategy: Option, - /// The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf. - /// During model training, SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). - /// To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole permission. + /// The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to + /// perform tasks on your behalf. + /// + /// + /// During model training, SageMaker needs your permission to read input data + /// from an S3 bucket, download a Docker image that contains training code, write + /// model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and + /// publish metrics to Amazon CloudWatch. You grant permissions for all of these + /// tasks to an IAM role. For more information, see SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + /// + /// + /// To be able to pass this role to SageMaker, the caller of this API must have + /// the iam:PassRole permission. #[serde(rename = "roleARN")] pub role_arn: String, - /// Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs. - /// To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. + /// Specifies a limit to how long a model training job can run. It also specifies + /// how long a managed Spot training job has to complete. When the job reaches + /// the time limit, SageMaker ends the training job. Use this API to cap model + /// training costs. + /// + /// + /// To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays + /// job termination for 120 seconds. Algorithms can use this 120-second window + /// to save the model artifacts, so the results of training are not lost. #[serde(rename = "stoppingCondition")] pub stopping_condition: TrainingJobStoppingCondition, - /// An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + /// An array of key-value pairs. You can use tags to categorize your Amazon Web + /// Services resources in different ways, for example, by purpose, owner, or + /// environment. For more information, see Tagging Amazon Web Services Resources + /// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, - /// Configuration of storage locations for the Amazon SageMaker Debugger TensorBoard output data. + /// Configuration of storage locations for the Amazon SageMaker Debugger TensorBoard + /// output data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tensorBoardOutputConfig")] pub tensor_board_output_config: Option, - /// The name of the training job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account. + /// The name of the training job. The name must be unique within an Amazon Web + /// Services Region in an Amazon Web Services account. #[serde(rename = "trainingJobName")] pub training_job_name: String, - /// A VpcConfig object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + /// A VpcConfig object that specifies the VPC that you want your training job + /// to connect to. Control access to and from your training container by configuring + /// the VPC. For more information, see Protect Training Jobs by Using an Amazon + /// Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcConfig")] pub vpc_config: Option, } -/// The registry path of the Docker image that contains the training algorithm and algorithm-specific metadata, including the input mode. For more information about algorithms provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information about providing your own algorithms, see Using Your Own Algorithms with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). +/// The registry path of the Docker image that contains the training algorithm +/// and algorithm-specific metadata, including the input mode. For more information +/// about algorithms provided by SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). +/// For information about providing your own algorithms, see Using Your Own Algorithms +/// with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobAlgorithmSpecification { #[serde(default, skip_serializing_if = "Option::is_none", rename = "algorithmName")] @@ -105,21 +209,59 @@ pub struct TrainingJobAlgorithmSpecification { pub metric_definitions: Option>, #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingImage")] pub training_image: Option, - /// The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - /// Pipe mode - /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. - /// File mode - /// If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. - /// You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. - /// For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. - /// FastFile mode - /// If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. - /// FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided. + /// The training input mode that the algorithm supports. For more information + /// about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// + /// + /// Pipe mode + /// + /// + /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + /// from Amazon S3 to the container. + /// + /// + /// File mode + /// + /// + /// If an algorithm supports File mode, SageMaker downloads the training data + /// from S3 to the provisioned ML storage volume, and mounts the directory to + /// the Docker volume for the training container. + /// + /// + /// You must provision the ML storage volume with sufficient capacity to accommodate + /// the data downloaded from S3. In addition to the training data, the ML storage + /// volume also stores the output model. The algorithm container uses the ML + /// storage volume to also store intermediate information, if any. + /// + /// + /// For distributed algorithms, training data is distributed uniformly. Your + /// training duration is predictable if the input data objects sizes are approximately + /// the same. SageMaker does not split the files any further for model training. + /// If the object sizes are skewed, training won't be optimal as the data distribution + /// is also skewed when one host in a training cluster is overloaded, thus becoming + /// a bottleneck in training. + /// + /// + /// FastFile mode + /// + /// + /// If an algorithm supports FastFile mode, SageMaker streams data directly from + /// S3 to the container with no code changes, and provides file system access + /// to the data. Users can author their training script to interact with these + /// files as if they were stored on disk. + /// + /// + /// FastFile mode works best when the data is read sequentially. Augmented manifest + /// files aren't supported. The startup time is lower when there are fewer files + /// in the S3 bucket provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingInputMode")] pub training_input_mode: Option, } -/// Specifies a metric that the training algorithm writes to stderr or stdout. SageMakerhyperparameter tuning captures all defined metrics. You specify one metric that a hyperparameter tuning job uses as its objective metric to choose the best training job. +/// Specifies a metric that the training algorithm writes to stderr or stdout. +/// SageMakerhyperparameter tuning captures all defined metrics. You specify +/// one metric that a hyperparameter tuning job uses as its objective metric +/// to choose the best training job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobAlgorithmSpecificationMetricDefinitions { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -128,7 +270,8 @@ pub struct TrainingJobAlgorithmSpecificationMetricDefinitions { pub regex: Option, } -/// Contains information about the output location for managed spot training checkpoint data. +/// Contains information about the output location for managed spot training +/// checkpoint data. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobCheckpointConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -137,7 +280,11 @@ pub struct TrainingJobCheckpointConfig { pub s3_uri: Option, } -/// Configuration information for the Amazon SageMaker Debugger hook parameters, metric and tensor collections, and storage paths. To learn more about how to configure the DebugHookConfig parameter, see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html). +/// Configuration information for the Amazon SageMaker Debugger hook parameters, +/// metric and tensor collections, and storage paths. To learn more about how +/// to configure the DebugHookConfig parameter, see Use the SageMaker and Debugger +/// Configuration API Operations to Create, Update, and Debug Your Training Job +/// (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobDebugHookConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "collectionConfigurations")] @@ -150,7 +297,8 @@ pub struct TrainingJobDebugHookConfig { pub s3_output_path: Option, } -/// Configuration information for the Amazon SageMaker Debugger output tensor collections. +/// Configuration information for the Amazon SageMaker Debugger output tensor +/// collections. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobDebugHookConfigCollectionConfigurations { #[serde(default, skip_serializing_if = "Option::is_none", rename = "collectionName")] @@ -159,7 +307,10 @@ pub struct TrainingJobDebugHookConfigCollectionConfigurations { pub collection_parameters: Option>, } -/// Configuration information for SageMaker Debugger rules for debugging. To learn more about how to configure the DebugRuleConfiguration parameter, see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html). +/// Configuration information for SageMaker Debugger rules for debugging. To +/// learn more about how to configure the DebugRuleConfiguration parameter, see +/// Use the SageMaker and Debugger Configuration API Operations to Create, Update, +/// and Debug Your Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-createtrainingjob-api.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobDebugRuleConfigurations { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -178,10 +329,17 @@ pub struct TrainingJobDebugRuleConfigurations { pub volume_size_in_gb: Option, } -/// Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: -/// * CreateProcessingJob -/// * CreateTrainingJob -/// * CreateTransformJob +/// Associates a SageMaker job as a trial component with an experiment and trial. +/// Specified when you call the following APIs: +/// +/// +/// * CreateProcessingJob +/// +/// +/// * CreateTrainingJob +/// +/// +/// * CreateTransformJob #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobExperimentConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "experimentName")] @@ -204,22 +362,70 @@ pub struct TrainingJobInputDataConfig { /// Describes the location of the channel data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataSource")] pub data_source: Option, - /// The training input mode that the algorithm supports. For more information about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). - /// Pipe mode - /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly from Amazon S3 to the container. - /// File mode - /// If an algorithm supports File mode, SageMaker downloads the training data from S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume for the training container. - /// You must provision the ML storage volume with sufficient capacity to accommodate the data downloaded from S3. In addition to the training data, the ML storage volume also stores the output model. The algorithm container uses the ML storage volume to also store intermediate information, if any. - /// For distributed algorithms, training data is distributed uniformly. Your training duration is predictable if the input data objects sizes are approximately the same. SageMaker does not split the files any further for model training. If the object sizes are skewed, training won't be optimal as the data distribution is also skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in training. - /// FastFile mode - /// If an algorithm supports FastFile mode, SageMaker streams data directly from S3 to the container with no code changes, and provides file system access to the data. Users can author their training script to interact with these files as if they were stored on disk. - /// FastFile mode works best when the data is read sequentially. Augmented manifest files aren't supported. The startup time is lower when there are fewer files in the S3 bucket provided. + /// The training input mode that the algorithm supports. For more information + /// about input modes, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + /// + /// + /// Pipe mode + /// + /// + /// If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + /// from Amazon S3 to the container. + /// + /// + /// File mode + /// + /// + /// If an algorithm supports File mode, SageMaker downloads the training data + /// from S3 to the provisioned ML storage volume, and mounts the directory to + /// the Docker volume for the training container. + /// + /// + /// You must provision the ML storage volume with sufficient capacity to accommodate + /// the data downloaded from S3. In addition to the training data, the ML storage + /// volume also stores the output model. The algorithm container uses the ML + /// storage volume to also store intermediate information, if any. + /// + /// + /// For distributed algorithms, training data is distributed uniformly. Your + /// training duration is predictable if the input data objects sizes are approximately + /// the same. SageMaker does not split the files any further for model training. + /// If the object sizes are skewed, training won't be optimal as the data distribution + /// is also skewed when one host in a training cluster is overloaded, thus becoming + /// a bottleneck in training. + /// + /// + /// FastFile mode + /// + /// + /// If an algorithm supports FastFile mode, SageMaker streams data directly from + /// S3 to the container with no code changes, and provides file system access + /// to the data. Users can author their training script to interact with these + /// files as if they were stored on disk. + /// + /// + /// FastFile mode works best when the data is read sequentially. Augmented manifest + /// files aren't supported. The startup time is lower when there are fewer files + /// in the S3 bucket provided. #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputMode")] pub input_mode: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "recordWrapperType")] pub record_wrapper_type: Option, - /// A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. - /// For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch. + /// A configuration for a shuffle option for input data in a channel. If you + /// use S3Prefix for S3DataType, the results of the S3 key prefix matches are + /// shuffled. If you use ManifestFile, the order of the S3 object references + /// in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order + /// of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling + /// order is determined using the Seed value. + /// + /// + /// For Pipe input mode, when ShuffleConfig is specified shuffling is done at + /// the start of every epoch. With large datasets, this ensures that the order + /// of the training data is different for each epoch, and it helps reduce bias + /// and possible overfitting. In a multi-node training job when ShuffleConfig + /// is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled + /// across nodes so that the content sent to a particular node on the first epoch + /// might be sent to a different node on the second epoch. #[serde(default, skip_serializing_if = "Option::is_none", rename = "shuffleConfig")] pub shuffle_config: Option, } @@ -263,15 +469,29 @@ pub struct TrainingJobInputDataConfigDataSourceS3DataSource { pub s3_uri: Option, } -/// A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value. -/// For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch. +/// A configuration for a shuffle option for input data in a channel. If you +/// use S3Prefix for S3DataType, the results of the S3 key prefix matches are +/// shuffled. If you use ManifestFile, the order of the S3 object references +/// in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order +/// of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling +/// order is determined using the Seed value. +/// +/// +/// For Pipe input mode, when ShuffleConfig is specified shuffling is done at +/// the start of every epoch. With large datasets, this ensures that the order +/// of the training data is different for each epoch, and it helps reduce bias +/// and possible overfitting. In a multi-node training job when ShuffleConfig +/// is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled +/// across nodes so that the content sent to a particular node on the first epoch +/// might be sent to a different node on the second epoch. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobInputDataConfigShuffleConfig { #[serde(default, skip_serializing_if = "Option::is_none")] pub seed: Option, } -/// Specifies the path to the S3 location where you want to store model artifacts. SageMaker creates subfolders for the artifacts. +/// Specifies the path to the S3 location where you want to store model artifacts. +/// SageMaker creates subfolders for the artifacts. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobOutputDataConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "kmsKeyID")] @@ -280,7 +500,8 @@ pub struct TrainingJobOutputDataConfig { pub s3_output_path: Option, } -/// Configuration information for Amazon SageMaker Debugger system monitoring, framework profiling, and storage paths. +/// Configuration information for Amazon SageMaker Debugger system monitoring, +/// framework profiling, and storage paths. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobProfilerConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "profilingIntervalInMilliseconds")] @@ -310,8 +531,15 @@ pub struct TrainingJobProfilerRuleConfigurations { pub volume_size_in_gb: Option, } -/// The resources, including the ML compute instances and ML storage volumes, to use for model training. -/// ML storage volumes store model artifacts and incremental states. Training algorithms might also use ML storage volumes for scratch space. If you want SageMaker to use the ML storage volume to store the training data, choose File as the TrainingInputMode in the algorithm specification. For distributed training algorithms, specify an instance count greater than 1. +/// The resources, including the ML compute instances and ML storage volumes, +/// to use for model training. +/// +/// +/// ML storage volumes store model artifacts and incremental states. Training +/// algorithms might also use ML storage volumes for scratch space. If you want +/// SageMaker to use the ML storage volume to store the training data, choose +/// File as the TrainingInputMode in the algorithm specification. For distributed +/// training algorithms, specify an instance count greater than 1. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobResourceConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -328,7 +556,9 @@ pub struct TrainingJobResourceConfig { pub volume_size_in_gb: Option, } -/// Defines an instance group for heterogeneous cluster training. When requesting a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) API, you can configure multiple instance groups . +/// Defines an instance group for heterogeneous cluster training. When requesting +/// a training job using the CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTrainingJob.html) +/// API, you can configure multiple instance groups . #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobResourceConfigInstanceGroups { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -346,8 +576,15 @@ pub struct TrainingJobRetryStrategy { pub maximum_retry_attempts: Option, } -/// Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs. -/// To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost. +/// Specifies a limit to how long a model training job can run. It also specifies +/// how long a managed Spot training job has to complete. When the job reaches +/// the time limit, SageMaker ends the training job. Use this API to cap model +/// training costs. +/// +/// +/// To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays +/// job termination for 120 seconds. Algorithms can use this 120-second window +/// to save the model artifacts, so the results of training are not lost. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobStoppingCondition { #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxRuntimeInSeconds")] @@ -356,9 +593,21 @@ pub struct TrainingJobStoppingCondition { pub max_wait_time_in_seconds: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -367,7 +616,8 @@ pub struct TrainingJobTags { pub value: Option, } -/// Configuration of storage locations for the Amazon SageMaker Debugger TensorBoard output data. +/// Configuration of storage locations for the Amazon SageMaker Debugger TensorBoard +/// output data. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobTensorBoardOutputConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "localPath")] @@ -376,7 +626,10 @@ pub struct TrainingJobTensorBoardOutputConfig { pub s3_output_path: Option, } -/// A VpcConfig object that specifies the VPC that you want your training job to connect to. Control access to and from your training container by configuring the VPC. For more information, see Protect Training Jobs by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). +/// A VpcConfig object that specifies the VPC that you want your training job +/// to connect to. Control access to and from your training container by configuring +/// the VPC. For more information, see Protect Training Jobs by Using an Amazon +/// Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobVpcConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroupIDs")] @@ -388,16 +641,22 @@ pub struct TrainingJobVpcConfig { /// TrainingJobStatus defines the observed state of TrainingJob #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// A timestamp that indicates when the training job was created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "creationTime")] pub creation_time: Option, - /// Evaluation status of Amazon SageMaker Debugger rules for debugging on a training job. + /// Evaluation status of Amazon SageMaker Debugger rules for debugging on a training + /// job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "debugRuleEvaluationStatuses")] pub debug_rule_evaluation_statuses: Option>, /// If the training job failed, the reason it failed. @@ -406,48 +665,120 @@ pub struct TrainingJobStatus { /// A timestamp that indicates when the status of the training job was last modified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastModifiedTime")] pub last_modified_time: Option, - /// Information about the Amazon S3 location that is configured for storing model artifacts. + /// Information about the Amazon S3 location that is configured for storing model + /// artifacts. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelArtifacts")] pub model_artifacts: Option, - /// Evaluation status of Amazon SageMaker Debugger rules for profiling on a training job. + /// Evaluation status of Amazon SageMaker Debugger rules for profiling on a training + /// job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "profilerRuleEvaluationStatuses")] pub profiler_rule_evaluation_statuses: Option>, /// Profiling status of a training job. #[serde(default, skip_serializing_if = "Option::is_none", rename = "profilingStatus")] pub profiling_status: Option, - /// Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition. - /// SageMaker provides primary statuses and secondary statuses that apply to each of them: - /// InProgress - /// * Starting - Starting the training job. - /// * Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes. - /// * Training - Training is in progress. - /// * Interrupted - The job stopped because the managed spot training instances were interrupted. - /// * Uploading - Training is complete and the model artifacts are being uploaded to the S3 location. - /// Completed - /// * Completed - The training job has completed. - /// Failed - /// * Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse. - /// Stopped - /// * MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime. - /// * MaxWaitTimeExceeded - The job stopped because it exceeded the maximum allowed wait time. - /// * Stopped - The training job has stopped. - /// Stopping - /// * Stopping - Stopping the training job. - /// Valid values for SecondaryStatus are subject to change. - /// We no longer support the following secondary statuses: - /// * LaunchingMLInstances - /// * PreparingTraining - /// * DownloadingTrainingImage + /// Provides detailed information about the state of the training job. For detailed + /// information on the secondary status of the training job, see StatusMessage + /// under SecondaryStatusTransition. + /// + /// + /// SageMaker provides primary statuses and secondary statuses that apply to + /// each of them: + /// + /// + /// InProgress + /// + /// + /// * Starting - Starting the training job. + /// + /// + /// * Downloading - An optional stage for algorithms that support File training + /// input mode. It indicates that data is being downloaded to the ML storage + /// volumes. + /// + /// + /// * Training - Training is in progress. + /// + /// + /// * Interrupted - The job stopped because the managed spot training instances + /// were interrupted. + /// + /// + /// * Uploading - Training is complete and the model artifacts are being uploaded + /// to the S3 location. + /// + /// + /// Completed + /// + /// + /// * Completed - The training job has completed. + /// + /// + /// Failed + /// + /// + /// * Failed - The training job has failed. The reason for the failure is + /// returned in the FailureReason field of DescribeTrainingJobResponse. + /// + /// + /// Stopped + /// + /// + /// * MaxRuntimeExceeded - The job stopped because it exceeded the maximum + /// allowed runtime. + /// + /// + /// * MaxWaitTimeExceeded - The job stopped because it exceeded the maximum + /// allowed wait time. + /// + /// + /// * Stopped - The training job has stopped. + /// + /// + /// Stopping + /// + /// + /// * Stopping - Stopping the training job. + /// + /// + /// Valid values for SecondaryStatus are subject to change. + /// + /// + /// We no longer support the following secondary statuses: + /// + /// + /// * LaunchingMLInstances + /// + /// + /// * PreparingTraining + /// + /// + /// * DownloadingTrainingImage #[serde(default, skip_serializing_if = "Option::is_none", rename = "secondaryStatus")] pub secondary_status: Option, - /// The status of the training job. - /// SageMaker provides the following training job statuses: - /// * InProgress - The training is in progress. - /// * Completed - The training job has completed. - /// * Failed - The training job has failed. To see the reason for the failure, see the FailureReason field in the response to a DescribeTrainingJobResponse call. - /// * Stopping - The training job is stopping. - /// * Stopped - The training job has stopped. - /// For more detailed information, see SecondaryStatus. + /// The status of the training job. + /// + /// + /// SageMaker provides the following training job statuses: + /// + /// + /// * InProgress - The training is in progress. + /// + /// + /// * Completed - The training job has completed. + /// + /// + /// * Failed - The training job has failed. To see the reason for the failure, + /// see the FailureReason field in the response to a DescribeTrainingJobResponse + /// call. + /// + /// + /// * Stopping - The training job is stopping. + /// + /// + /// * Stopped - The training job has stopped. + /// + /// + /// For more detailed information, see SecondaryStatus. #[serde(default, skip_serializing_if = "Option::is_none", rename = "trainingJobStatus")] pub training_job_status: Option, /// The status of the warm pool associated with the training job. @@ -455,20 +786,32 @@ pub struct TrainingJobStatus { pub warm_pool_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobStatusConditions { /// Last time the condition transitioned from one status to another. @@ -502,7 +845,8 @@ pub struct TrainingJobStatusDebugRuleEvaluationStatuses { pub status_details: Option, } -/// Information about the Amazon S3 location that is configured for storing model artifacts. +/// Information about the Amazon S3 location that is configured for storing model +/// artifacts. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TrainingJobStatusModelArtifacts { #[serde(default, skip_serializing_if = "Option::is_none", rename = "s3ModelArtifacts")] diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/transformjobs.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/transformjobs.rs index e65dd5560..b095a3d92 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/transformjobs.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/transformjobs.rs @@ -6,64 +6,125 @@ use kube::CustomResource; use serde::{Serialize, Deserialize}; use std::collections::BTreeMap; -/// TransformJobSpec defines the desired state of TransformJob. -/// A batch transform job. For information about SageMaker batch transform, see Use Batch Transform (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html). +/// TransformJobSpec defines the desired state of TransformJob. +/// +/// +/// A batch transform job. For information about SageMaker batch transform, see +/// Use Batch Transform (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html). #[derive(CustomResource, Serialize, Deserialize, Clone, Debug, Default, PartialEq)] #[kube(group = "sagemaker.services.k8s.aws", version = "v1alpha1", kind = "TransformJob", plural = "transformjobs")] #[kube(namespaced)] #[kube(status = "TransformJobStatus")] #[kube(schema = "disabled")] pub struct TransformJobSpec { - /// Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record. - /// To enable the batch strategy, you must set the SplitType property to Line, RecordIO, or TFRecord. - /// To use only one record when making an HTTP invocation request to a container, set BatchStrategy to SingleRecord and SplitType to Line. - /// To fit as many records in a mini-batch as can fit within the MaxPayloadInMB limit, set BatchStrategy to MultiRecord and SplitType to Line. + /// Specifies the number of records to include in a mini-batch for an HTTP inference + /// request. A record is a single unit of input data that inference can be made + /// on. For example, a single line in a CSV file is a record. + /// + /// + /// To enable the batch strategy, you must set the SplitType property to Line, + /// RecordIO, or TFRecord. + /// + /// + /// To use only one record when making an HTTP invocation request to a container, + /// set BatchStrategy to SingleRecord and SplitType to Line. + /// + /// + /// To fit as many records in a mini-batch as can fit within the MaxPayloadInMB + /// limit, set BatchStrategy to MultiRecord and SplitType to Line. #[serde(default, skip_serializing_if = "Option::is_none", rename = "batchStrategy")] pub batch_strategy: Option, - /// The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). + /// The data structure used to specify the data to be used for inference in a + /// batch transform job and to associate the data that is relevant to the prediction + /// results in the output. The input filter provided allows you to exclude input + /// data that is not needed for inference in a batch transform job. The output + /// filter provided allows you to include input data relevant to interpreting + /// the predictions in the output from the job. For more information, see Associate + /// Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "dataProcessing")] pub data_processing: Option, - /// The environment variables to set in the Docker container. We support up to 16 key and values entries in the map. + /// The environment variables to set in the Docker container. We support up to + /// 16 key and values entries in the map. #[serde(default, skip_serializing_if = "Option::is_none")] pub environment: Option>, - /// Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: - /// * CreateProcessingJob - /// * CreateTrainingJob - /// * CreateTransformJob + /// Associates a SageMaker job as a trial component with an experiment and trial. + /// Specified when you call the following APIs: + /// + /// + /// * CreateProcessingJob + /// + /// + /// * CreateTrainingJob + /// + /// + /// * CreateTransformJob #[serde(default, skip_serializing_if = "Option::is_none", rename = "experimentConfig")] pub experiment_config: Option, - /// The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms. + /// The maximum number of parallel requests that can be sent to each instance + /// in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, + /// Amazon SageMaker checks the optional execution-parameters to determine the + /// settings for your chosen algorithm. If the execution-parameters endpoint + /// is not enabled, the default value is 1. For more information on execution-parameters, + /// see How Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). + /// For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxConcurrentTransforms")] pub max_concurrent_transforms: Option, - /// The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater than, or equal to, the size of a single record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The default value is 6 MB. - /// The value of MaxPayloadInMB cannot be greater than 100 MB. If you specify the MaxConcurrentTransforms parameter, the value of (MaxConcurrentTransforms * MaxPayloadInMB) also cannot exceed 100 MB. - /// For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the value to 0. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in algorithms do not support HTTP chunked encoding. + /// The maximum allowed size of the payload, in MB. A payload is the data portion + /// of a record (without metadata). The value in MaxPayloadInMB must be greater + /// than, or equal to, the size of a single record. To estimate the size of a + /// record in MB, divide the size of your dataset by the number of records. To + /// ensure that the records fit within the maximum payload size, we recommend + /// using a slightly larger value. The default value is 6 MB. + /// + /// + /// The value of MaxPayloadInMB cannot be greater than 100 MB. If you specify + /// the MaxConcurrentTransforms parameter, the value of (MaxConcurrentTransforms + /// * MaxPayloadInMB) also cannot exceed 100 MB. + /// + /// + /// For cases where the payload might be arbitrarily large and is transmitted + /// using HTTP chunked encoding, set the value to 0. This feature works only + /// in supported algorithms. Currently, Amazon SageMaker built-in algorithms + /// do not support HTTP chunked encoding. #[serde(default, skip_serializing_if = "Option::is_none", rename = "maxPayloadInMB")] pub max_payload_in_mb: Option, - /// Configures the timeout and maximum number of retries for processing a transform job invocation. + /// Configures the timeout and maximum number of retries for processing a transform + /// job invocation. #[serde(default, skip_serializing_if = "Option::is_none", rename = "modelClientConfig")] pub model_client_config: Option, - /// The name of the model that you want to use for the transform job. ModelName must be the name of an existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services account. + /// The name of the model that you want to use for the transform job. ModelName + /// must be the name of an existing Amazon SageMaker model within an Amazon Web + /// Services Region in an Amazon Web Services account. #[serde(rename = "modelName")] pub model_name: String, - /// (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) in the Amazon Web Services Billing and Cost Management User Guide. + /// (Optional) An array of key-value pairs. For more information, see Using Cost + /// Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + /// in the Amazon Web Services Billing and Cost Management User Guide. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// Describes the input source and the way the transform job consumes it. #[serde(rename = "transformInput")] pub transform_input: TransformJobTransformInput, - /// The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account. + /// The name of the transform job. The name must be unique within an Amazon Web + /// Services Region in an Amazon Web Services account. #[serde(rename = "transformJobName")] pub transform_job_name: String, /// Describes the results of the transform job. #[serde(rename = "transformOutput")] pub transform_output: TransformJobTransformOutput, - /// Describes the resources, including ML instance types and ML instance count, to use for the transform job. + /// Describes the resources, including ML instance types and ML instance count, + /// to use for the transform job. #[serde(rename = "transformResources")] pub transform_resources: TransformJobTransformResources, } -/// The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). +/// The data structure used to specify the data to be used for inference in a +/// batch transform job and to associate the data that is relevant to the prediction +/// results in the output. The input filter provided allows you to exclude input +/// data that is not needed for inference in a batch transform job. The output +/// filter provided allows you to include input data relevant to interpreting +/// the predictions in the output from the job. For more information, see Associate +/// Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobDataProcessing { #[serde(default, skip_serializing_if = "Option::is_none", rename = "inputFilter")] @@ -74,10 +135,17 @@ pub struct TransformJobDataProcessing { pub output_filter: Option, } -/// Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs: -/// * CreateProcessingJob -/// * CreateTrainingJob -/// * CreateTransformJob +/// Associates a SageMaker job as a trial component with an experiment and trial. +/// Specified when you call the following APIs: +/// +/// +/// * CreateProcessingJob +/// +/// +/// * CreateTrainingJob +/// +/// +/// * CreateTransformJob #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobExperimentConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "experimentName")] @@ -88,7 +156,8 @@ pub struct TransformJobExperimentConfig { pub trial_name: Option, } -/// Configures the timeout and maximum number of retries for processing a transform job invocation. +/// Configures the timeout and maximum number of retries for processing a transform +/// job invocation. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobModelClientConfig { #[serde(default, skip_serializing_if = "Option::is_none", rename = "invocationsMaxRetries")] @@ -97,9 +166,21 @@ pub struct TransformJobModelClientConfig { pub invocations_timeout_in_seconds: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -152,7 +233,8 @@ pub struct TransformJobTransformOutput { pub s3_output_path: Option, } -/// Describes the resources, including ML instance types and ML instance count, to use for the transform job. +/// Describes the resources, including ML instance types and ML instance count, +/// to use for the transform job. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobTransformResources { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceCount")] @@ -166,34 +248,55 @@ pub struct TransformJobTransformResources { /// TransformJobStatus defines the observed state of TransformJob #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// If the transform job failed, FailureReason describes why it failed. A transform job creates a log file, which includes error messages, and stores it as an Amazon S3 object. For more information, see Log Amazon SageMaker Events with Amazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). + /// If the transform job failed, FailureReason describes why it failed. A transform + /// job creates a log file, which includes error messages, and stores it as an + /// Amazon S3 object. For more information, see Log Amazon SageMaker Events with + /// Amazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureReason")] pub failure_reason: Option, - /// The status of the transform job. If the transform job failed, the reason is returned in the FailureReason field. + /// The status of the transform job. If the transform job failed, the reason + /// is returned in the FailureReason field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "transformJobStatus")] pub transform_job_status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct TransformJobStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/userprofiles.rs b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/userprofiles.rs index 65c250148..a7dc4f0be 100644 --- a/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/userprofiles.rs +++ b/kube-custom-resources-rs/src/sagemaker_services_k8s_aws/v1alpha1/userprofiles.rs @@ -15,14 +15,25 @@ pub struct UserProfileSpec { /// The ID of the associated Domain. #[serde(rename = "domainID")] pub domain_id: String, - /// A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is "UserName". If the Domain's AuthMode is IAM Identity Center, this field is required. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified. + /// A specifier for the type of value specified in SingleSignOnUserValue. Currently, + /// the only supported value is "UserName". If the Domain's AuthMode is IAM Identity + /// Center, this field is required. If the Domain's AuthMode is not IAM Identity + /// Center, this field cannot be specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "singleSignOnUserIdentifier")] pub single_sign_on_user_identifier: Option, - /// The username of the associated Amazon Web Services Single Sign-On User for this UserProfile. If the Domain's AuthMode is IAM Identity Center, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not IAM Identity Center, this field cannot be specified. + /// The username of the associated Amazon Web Services Single Sign-On User for + /// this UserProfile. If the Domain's AuthMode is IAM Identity Center, this field + /// is required, and must match a valid username of a user in your directory. + /// If the Domain's AuthMode is not IAM Identity Center, this field cannot be + /// specified. #[serde(default, skip_serializing_if = "Option::is_none", rename = "singleSignOnUserValue")] pub single_sign_on_user_value: Option, - /// Each tag consists of a key and an optional value. Tag keys must be unique per resource. - /// Tags that you specify for the User Profile are also added to all Apps that the User Profile launches. + /// Each tag consists of a key and an optional value. Tag keys must be unique + /// per resource. + /// + /// + /// Tags that you specify for the User Profile are also added to all Apps that + /// the User Profile launches. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// A name for the UserProfile. This value is not case sensitive. @@ -33,9 +44,21 @@ pub struct UserProfileSpec { pub user_settings: Option, } -/// A tag object that consists of a key and an optional value, used to manage metadata for SageMaker Amazon Web Services resources. -/// You can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see AddTags. -/// For more information on adding metadata to your Amazon Web Services resources with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). For advice on best practices for managing Amazon Web Services resources with tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). +/// A tag object that consists of a key and an optional value, used to manage +/// metadata for SageMaker Amazon Web Services resources. +/// +/// +/// You can add tags to notebook instances, training jobs, hyperparameter tuning +/// jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, +/// and endpoints. For more information on adding tags to SageMaker resources, +/// see AddTags. +/// +/// +/// For more information on adding metadata to your Amazon Web Services resources +/// with tagging, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). +/// For advice on best practices for managing Amazon Web Services resources with +/// tagging, see Tagging Best Practices: Implement an Effective Amazon Web Services +/// Resource Tagging Strategy (https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -55,12 +78,17 @@ pub struct UserProfileUserSettings { /// The KernelGateway app settings. #[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelGatewayAppSettings")] pub kernel_gateway_app_settings: Option, - /// A collection of settings that configure user interaction with the RStudioServerPro app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro app must be deleted and a new one created to make any changes. + /// A collection of settings that configure user interaction with the RStudioServerPro + /// app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro + /// app must be deleted and a new one created to make any changes. #[serde(default, skip_serializing_if = "Option::is_none", rename = "rStudioServerProAppSettings")] pub r_studio_server_pro_app_settings: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "securityGroups")] pub security_groups: Option>, - /// Specifies options for sharing SageMaker Studio notebooks. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called. When SharingSettings is not specified, notebook sharing isn't allowed. + /// Specifies options for sharing SageMaker Studio notebooks. These settings + /// are specified as part of DefaultUserSettings when the CreateDomain API is + /// called, and as part of UserSettings when the CreateUserProfile API is called. + /// When SharingSettings is not specified, notebook sharing isn't allowed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "sharingSettings")] pub sharing_settings: Option, /// The TensorBoard app settings. @@ -71,14 +99,16 @@ pub struct UserProfileUserSettings { /// The JupyterServer app settings. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsJupyterServerAppSettings { - /// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. + /// Specifies the ARN's of a SageMaker image and SageMaker image version, and + /// the instance type that the version runs on. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultResourceSpec")] pub default_resource_spec: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleConfigARNs")] pub lifecycle_config_ar_ns: Option>, } -/// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. +/// Specifies the ARN's of a SageMaker image and SageMaker image version, and +/// the instance type that the version runs on. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsJupyterServerAppSettingsDefaultResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -96,14 +126,16 @@ pub struct UserProfileUserSettingsJupyterServerAppSettingsDefaultResourceSpec { pub struct UserProfileUserSettingsKernelGatewayAppSettings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "customImages")] pub custom_images: Option>, - /// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. + /// Specifies the ARN's of a SageMaker image and SageMaker image version, and + /// the instance type that the version runs on. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultResourceSpec")] pub default_resource_spec: Option, #[serde(default, skip_serializing_if = "Option::is_none", rename = "lifecycleConfigARNs")] pub lifecycle_config_ar_ns: Option>, } -/// A custom SageMaker image. For more information, see Bring your own SageMaker image (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html). +/// A custom SageMaker image. For more information, see Bring your own SageMaker +/// image (https://docs.aws.amazon.com/sagemaker/latest/dg/studio-byoi.html). #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsKernelGatewayAppSettingsCustomImages { #[serde(default, skip_serializing_if = "Option::is_none", rename = "appImageConfigName")] @@ -114,7 +146,8 @@ pub struct UserProfileUserSettingsKernelGatewayAppSettingsCustomImages { pub image_version_number: Option, } -/// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. +/// Specifies the ARN's of a SageMaker image and SageMaker image version, and +/// the instance type that the version runs on. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsKernelGatewayAppSettingsDefaultResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -127,7 +160,9 @@ pub struct UserProfileUserSettingsKernelGatewayAppSettingsDefaultResourceSpec { pub sage_maker_image_version_arn: Option, } -/// A collection of settings that configure user interaction with the RStudioServerPro app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro app must be deleted and a new one created to make any changes. +/// A collection of settings that configure user interaction with the RStudioServerPro +/// app. RStudioServerProAppSettings cannot be updated. The RStudioServerPro +/// app must be deleted and a new one created to make any changes. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsRStudioServerProAppSettings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "accessStatus")] @@ -136,7 +171,10 @@ pub struct UserProfileUserSettingsRStudioServerProAppSettings { pub user_group: Option, } -/// Specifies options for sharing SageMaker Studio notebooks. These settings are specified as part of DefaultUserSettings when the CreateDomain API is called, and as part of UserSettings when the CreateUserProfile API is called. When SharingSettings is not specified, notebook sharing isn't allowed. +/// Specifies options for sharing SageMaker Studio notebooks. These settings +/// are specified as part of DefaultUserSettings when the CreateDomain API is +/// called, and as part of UserSettings when the CreateUserProfile API is called. +/// When SharingSettings is not specified, notebook sharing isn't allowed. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsSharingSettings { #[serde(default, skip_serializing_if = "Option::is_none", rename = "notebookOutputOption")] @@ -150,12 +188,14 @@ pub struct UserProfileUserSettingsSharingSettings { /// The TensorBoard app settings. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsTensorBoardAppSettings { - /// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. + /// Specifies the ARN's of a SageMaker image and SageMaker image version, and + /// the instance type that the version runs on. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultResourceSpec")] pub default_resource_spec: Option, } -/// Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that the version runs on. +/// Specifies the ARN's of a SageMaker image and SageMaker image version, and +/// the instance type that the version runs on. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileUserSettingsTensorBoardAppSettingsDefaultResourceSpec { #[serde(default, skip_serializing_if = "Option::is_none", rename = "instanceType")] @@ -171,10 +211,15 @@ pub struct UserProfileUserSettingsTensorBoardAppSettingsDefaultResourceSpec { /// UserProfileStatus defines the observed state of UserProfile #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The status. @@ -182,20 +227,32 @@ pub struct UserProfileStatus { pub status: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct UserProfileStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/hcpvaultsecretsapps.rs b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/hcpvaultsecretsapps.rs index b7dd8696b..b5ac6d5ce 100644 --- a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/hcpvaultsecretsapps.rs +++ b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/hcpvaultsecretsapps.rs @@ -56,16 +56,14 @@ pub struct HCPVaultSecretsAppDestination { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct HCPVaultSecretsAppDestinationTransformation { /// ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration. - #[serde(rename = "excludeRaw")] - pub exclude_raw: bool, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeRaw")] + pub exclude_raw: Option, /// Excludes contains regex patterns used to filter top-level source secret data fields for exclusion from the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied before any inclusion patterns. To exclude all source secret data fields, you can configure the single pattern ".*". #[serde(default, skip_serializing_if = "Option::is_none")] pub excludes: Option>, /// Includes contains regex patterns used to filter top-level source secret data fields for inclusion in the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied last. #[serde(default, skip_serializing_if = "Option::is_none")] pub includes: Option>, - /// Resync the Secret on updates to any configured TransformationRefs. - pub resync: bool, /// Templates maps a template name to its Template. Templates are always included in the rendered K8s Secret, and take precedence over templates defined in a SecretTransformation. #[serde(default, skip_serializing_if = "Option::is_none")] pub templates: Option>, @@ -134,6 +132,9 @@ pub enum HCPVaultSecretsAppRolloutRestartTargetsKind { /// HCPVaultSecretsAppStatus defines the observed state of HCPVaultSecretsApp #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct HCPVaultSecretsAppStatus { + /// LastGeneration is the Generation of the last reconciled resource. + #[serde(rename = "lastGeneration")] + pub last_generation: i64, /// SecretMAC used when deciding whether new Vault secret data should be synced. /// The controller will compare the "new" HCP Vault Secrets App data to this value using HMAC, if they are different, then the data will be synced to the Destination. /// The SecretMac is also used to detect drift in the Destination Secret's Data. If drift is detected the data will be synced to the Destination. diff --git a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultdynamicsecrets.rs b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultdynamicsecrets.rs index b6c336d85..712c43f03 100644 --- a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultdynamicsecrets.rs +++ b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultdynamicsecrets.rs @@ -72,16 +72,14 @@ pub struct VaultDynamicSecretDestination { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct VaultDynamicSecretDestinationTransformation { /// ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration. - #[serde(rename = "excludeRaw")] - pub exclude_raw: bool, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeRaw")] + pub exclude_raw: Option, /// Excludes contains regex patterns used to filter top-level source secret data fields for exclusion from the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied before any inclusion patterns. To exclude all source secret data fields, you can configure the single pattern ".*". #[serde(default, skip_serializing_if = "Option::is_none")] pub excludes: Option>, /// Includes contains regex patterns used to filter top-level source secret data fields for inclusion in the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied last. #[serde(default, skip_serializing_if = "Option::is_none")] pub includes: Option>, - /// Resync the Secret on updates to any configured TransformationRefs. - pub resync: bool, /// Templates maps a template name to its Template. Templates are always included in the rendered K8s Secret, and take precedence over templates defined in a SecretTransformation. #[serde(default, skip_serializing_if = "Option::is_none")] pub templates: Option>, diff --git a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultpkisecrets.rs b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultpkisecrets.rs index 022c57f2a..37bdb3d94 100644 --- a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultpkisecrets.rs +++ b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultpkisecrets.rs @@ -67,6 +67,9 @@ pub struct VaultPKISecretSpec { /// The requested URI SANs. #[serde(default, skip_serializing_if = "Option::is_none", rename = "uriSans")] pub uri_sans: Option>, + /// User ID (OID 0.9.2342.19200300.100.1.1) Subject values to be placed on the signed certificate. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "userIDs")] + pub user_i_ds: Option>, /// VaultAuthRef to the VaultAuth resource, can be prefixed with a namespace, eg: `namespaceA/vaultAuthRefB`. If no namespace prefix is provided it will default to namespace of the VaultAuth CR. If no value is specified for VaultAuthRef the Operator will default to the `default` VaultAuth, configured in the operator's namespace. #[serde(default, skip_serializing_if = "Option::is_none", rename = "vaultAuthRef")] pub vault_auth_ref: Option, @@ -99,16 +102,14 @@ pub struct VaultPKISecretDestination { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct VaultPKISecretDestinationTransformation { /// ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration. - #[serde(rename = "excludeRaw")] - pub exclude_raw: bool, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeRaw")] + pub exclude_raw: Option, /// Excludes contains regex patterns used to filter top-level source secret data fields for exclusion from the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied before any inclusion patterns. To exclude all source secret data fields, you can configure the single pattern ".*". #[serde(default, skip_serializing_if = "Option::is_none")] pub excludes: Option>, /// Includes contains regex patterns used to filter top-level source secret data fields for inclusion in the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied last. #[serde(default, skip_serializing_if = "Option::is_none")] pub includes: Option>, - /// Resync the Secret on updates to any configured TransformationRefs. - pub resync: bool, /// Templates maps a template name to its Template. Templates are always included in the rendered K8s Secret, and take precedence over templates defined in a SecretTransformation. #[serde(default, skip_serializing_if = "Option::is_none")] pub templates: Option>, diff --git a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultstaticsecrets.rs b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultstaticsecrets.rs index 93f58122b..609f25e39 100644 --- a/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultstaticsecrets.rs +++ b/kube-custom-resources-rs/src/secrets_hashicorp_com/v1beta1/vaultstaticsecrets.rs @@ -69,16 +69,14 @@ pub struct VaultStaticSecretDestination { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct VaultStaticSecretDestinationTransformation { /// ExcludeRaw data from the destination Secret. Exclusion policy can be set globally by including 'exclude-raw` in the '--global-transformation-options' command line flag. If set, the command line flag always takes precedence over this configuration. - #[serde(rename = "excludeRaw")] - pub exclude_raw: bool, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "excludeRaw")] + pub exclude_raw: Option, /// Excludes contains regex patterns used to filter top-level source secret data fields for exclusion from the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied before any inclusion patterns. To exclude all source secret data fields, you can configure the single pattern ".*". #[serde(default, skip_serializing_if = "Option::is_none")] pub excludes: Option>, /// Includes contains regex patterns used to filter top-level source secret data fields for inclusion in the final K8s Secret data. These pattern filters are never applied to templated fields as defined in Templates. They are always applied last. #[serde(default, skip_serializing_if = "Option::is_none")] pub includes: Option>, - /// Resync the Secret on updates to any configured TransformationRefs. - pub resync: bool, /// Templates maps a template name to its Template. Templates are always included in the rendered K8s Secret, and take precedence over templates defined in a SecretTransformation. #[serde(default, skip_serializing_if = "Option::is_none")] pub templates: Option>, diff --git a/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/activities.rs b/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/activities.rs index 0cb10ac1f..9288dae79 100644 --- a/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/activities.rs +++ b/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/activities.rs @@ -12,25 +12,60 @@ use serde::{Serialize, Deserialize}; #[kube(status = "ActivityStatus")] #[kube(schema = "disabled")] pub struct ActivitySpec { - /// The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see Limits Related to State Machine Executions (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) in the AWS Step Functions Developer Guide. - /// A name must not contain: - /// * white space - /// * brackets < > { } [ ] - /// * wildcard characters ? * - /// * special characters " # % \ ^ | ~ ` $ & , ; : / - /// * control characters (U+0000-001F, U+007F-009F) - /// To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. + /// The name of the activity to create. This name must be unique for your AWS + /// account and region for 90 days. For more information, see Limits Related + /// to State Machine Executions (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) + /// in the AWS Step Functions Developer Guide. + /// + /// + /// A name must not contain: + /// + /// + /// * white space + /// + /// + /// * brackets < > { } [ ] + /// + /// + /// * wildcard characters ? * + /// + /// + /// * special characters " # % \ ^ | ~ ` $ & , ; : / + /// + /// + /// * control characters (U+0000-001F, U+007F-009F) + /// + /// + /// To enable logging with CloudWatch Logs, the name should only contain 0-9, + /// A-Z, a-z, - and _. pub name: String, - /// The list of tags to add to a resource. - /// An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). - /// Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @. + /// The list of tags to add to a resource. + /// + /// + /// An array of key-value pairs. For more information, see Using Cost Allocation + /// Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) + /// in the AWS Billing and Cost Management User Guide, and Controlling Access + /// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). + /// + /// + /// Tags may only contain Unicode letters, digits, white space, or these symbols: + /// _ . : / = + - @. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, } -/// Tags are key-value pairs that can be associated with Step Functions state machines and activities. -/// An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). -/// Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @. +/// Tags are key-value pairs that can be associated with Step Functions state +/// machines and activities. +/// +/// +/// An array of key-value pairs. For more information, see Using Cost Allocation +/// Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +/// in the AWS Billing and Cost Management User Guide, and Controlling Access +/// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). +/// +/// +/// Tags may only contain Unicode letters, digits, white space, or these symbols: +/// _ . : / = + - @. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ActivityTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -42,10 +77,15 @@ pub struct ActivityTags { /// ActivityStatus defines the observed state of Activity #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ActivityStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The date the activity is created. @@ -53,20 +93,32 @@ pub struct ActivityStatus { pub creation_date: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ActivityStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct ActivityStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/statemachines.rs b/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/statemachines.rs index 1ba89fbb8..ba5dff3e0 100644 --- a/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/statemachines.rs +++ b/kube-custom-resources-rs/src/sfn_services_k8s_aws/v1alpha1/statemachines.rs @@ -12,39 +12,73 @@ use serde::{Serialize, Deserialize}; #[kube(status = "StateMachineStatus")] #[kube(schema = "disabled")] pub struct StateMachineSpec { - /// The Amazon States Language definition of the state machine. See Amazon States Language (https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html). + /// The Amazon States Language definition of the state machine. See Amazon States + /// Language (https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html). pub definition: String, - /// Defines what execution history events are logged and where they are logged. - /// By default, the level is set to OFF. For more information see Log Levels (https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide. + /// Defines what execution history events are logged and where they are logged. + /// + /// + /// By default, the level is set to OFF. For more information see Log Levels + /// (https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) + /// in the AWS Step Functions User Guide. #[serde(default, skip_serializing_if = "Option::is_none", rename = "loggingConfiguration")] pub logging_configuration: Option, - /// The name of the state machine. - /// A name must not contain: - /// * white space - /// * brackets < > { } [ ] - /// * wildcard characters ? * - /// * special characters " # % \ ^ | ~ ` $ & , ; : / - /// * control characters (U+0000-001F, U+007F-009F) - /// To enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. + /// The name of the state machine. + /// + /// + /// A name must not contain: + /// + /// + /// * white space + /// + /// + /// * brackets < > { } [ ] + /// + /// + /// * wildcard characters ? * + /// + /// + /// * special characters " # % \ ^ | ~ ` $ & , ; : / + /// + /// + /// * control characters (U+0000-001F, U+007F-009F) + /// + /// + /// To enable logging with CloudWatch Logs, the name should only contain 0-9, + /// A-Z, a-z, - and _. pub name: String, /// The Amazon Resource Name (ARN) of the IAM role to use for this state machine. #[serde(rename = "roleARN")] pub role_arn: String, - /// Tags to be added when creating a state machine. - /// An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). - /// Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @. + /// Tags to be added when creating a state machine. + /// + /// + /// An array of key-value pairs. For more information, see Using Cost Allocation + /// Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) + /// in the AWS Billing and Cost Management User Guide, and Controlling Access + /// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). + /// + /// + /// Tags may only contain Unicode letters, digits, white space, or these symbols: + /// _ . : / = + - @. #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option>, /// Selects whether AWS X-Ray tracing is enabled. #[serde(default, skip_serializing_if = "Option::is_none", rename = "tracingConfiguration")] pub tracing_configuration: Option, - /// Determines whether a Standard or Express state machine is created. The default is STANDARD. You cannot update the type of a state machine once it has been created. + /// Determines whether a Standard or Express state machine is created. The default + /// is STANDARD. You cannot update the type of a state machine once it has been + /// created. #[serde(default, skip_serializing_if = "Option::is_none", rename = "type_")] pub r#type: Option, } -/// Defines what execution history events are logged and where they are logged. -/// By default, the level is set to OFF. For more information see Log Levels (https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide. +/// Defines what execution history events are logged and where they are logged. +/// +/// +/// By default, the level is set to OFF. For more information see Log Levels +/// (https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) +/// in the AWS Step Functions User Guide. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StateMachineLoggingConfiguration { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -67,9 +101,18 @@ pub struct StateMachineLoggingConfigurationDestinationsCloudWatchLogsLogGroup { pub log_group_arn: Option, } -/// Tags are key-value pairs that can be associated with Step Functions state machines and activities. -/// An array of key-value pairs. For more information, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide, and Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). -/// Tags may only contain Unicode letters, digits, white space, or these symbols: _ . : / = + - @. +/// Tags are key-value pairs that can be associated with Step Functions state +/// machines and activities. +/// +/// +/// An array of key-value pairs. For more information, see Using Cost Allocation +/// Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +/// in the AWS Billing and Cost Management User Guide, and Controlling Access +/// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). +/// +/// +/// Tags may only contain Unicode letters, digits, white space, or these symbols: +/// _ . : / = + - @. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StateMachineTags { #[serde(default, skip_serializing_if = "Option::is_none")] @@ -88,10 +131,15 @@ pub struct StateMachineTracingConfiguration { /// StateMachineStatus defines the observed state of StateMachine #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StateMachineStatus { - /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource + /// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member + /// that is used to contain resource sync state, account ownership, + /// constructed ARN for the resource #[serde(default, skip_serializing_if = "Option::is_none", rename = "ackResourceMetadata")] pub ack_resource_metadata: Option, - /// All CRS managed by ACK have a common `Status.Conditions` member that contains a collection of `ackv1alpha1.Condition` objects that describe the various terminal states of the CR and its backend AWS service API resource + /// All CRS managed by ACK have a common `Status.Conditions` member that + /// contains a collection of `ackv1alpha1.Condition` objects that describe + /// the various terminal states of the CR and its backend AWS service API + /// resource #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, /// The date the state machine is created. @@ -99,20 +147,32 @@ pub struct StateMachineStatus { pub creation_date: Option, } -/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member that is used to contain resource sync state, account ownership, constructed ARN for the resource +/// All CRs managed by ACK have a common `Status.ACKResourceMetadata` member +/// that is used to contain resource sync state, account ownership, +/// constructed ARN for the resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StateMachineStatusAckResourceMetadata { - /// ARN is the Amazon Resource Name for the resource. This is a globally-unique identifier and is set only by the ACK service controller once the controller has orchestrated the creation of the resource OR when it has verified that an "adopted" resource (a resource where the ARN annotation was set by the Kubernetes user on the CR) exists and matches the supplied CR's Spec field values. TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse https://github.com/aws/aws-controllers-k8s/issues/270 + /// ARN is the Amazon Resource Name for the resource. This is a + /// globally-unique identifier and is set only by the ACK service controller + /// once the controller has orchestrated the creation of the resource OR + /// when it has verified that an "adopted" resource (a resource where the + /// ARN annotation was set by the Kubernetes user on the CR) exists and + /// matches the supplied CR's Spec field values. + /// TODO(vijat@): Find a better strategy for resources that do not have ARN in CreateOutputResponse + /// https://github.com/aws/aws-controllers-k8s/issues/270 #[serde(default, skip_serializing_if = "Option::is_none")] pub arn: Option, - /// OwnerAccountID is the AWS Account ID of the account that owns the backend AWS service API resource. + /// OwnerAccountID is the AWS Account ID of the account that owns the + /// backend AWS service API resource. #[serde(rename = "ownerAccountID")] pub owner_account_id: String, /// Region is the AWS region in which the resource exists or will exist. pub region: String, } -/// Condition is the common struct used by all CRDs managed by ACK service controllers to indicate terminal states of the CR and its backend AWS service API resource +/// Condition is the common struct used by all CRDs managed by ACK service +/// controllers to indicate terminal states of the CR and its backend AWS +/// service API resource #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct StateMachineStatusConditions { /// Last time the condition transitioned from one status to another. diff --git a/kube-custom-resources-rs/src/snapscheduler_backube/v1/snapshotschedules.rs b/kube-custom-resources-rs/src/snapscheduler_backube/v1/snapshotschedules.rs index 92ff90286..33023b093 100644 --- a/kube-custom-resources-rs/src/snapscheduler_backube/v1/snapshotschedules.rs +++ b/kube-custom-resources-rs/src/snapscheduler_backube/v1/snapshotschedules.rs @@ -22,7 +22,8 @@ pub struct SnapshotScheduleSpec { /// Retention determines how long this schedule's snapshots will be kept. #[serde(default, skip_serializing_if = "Option::is_none")] pub retention: Option, - /// Schedule is a Cronspec specifying when snapshots should be taken. See https://en.wikipedia.org/wiki/Cron for a description of the format. + /// Schedule is a Cronspec specifying when snapshots should be taken. See + /// https://en.wikipedia.org/wiki/Cron for a description of the format. #[serde(default, skip_serializing_if = "Option::is_none")] pub schedule: Option, /// A template to customize the Snapshots. @@ -36,19 +37,26 @@ pub struct SnapshotScheduleClaimSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SnapshotScheduleClaimSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -56,7 +64,8 @@ pub struct SnapshotScheduleClaimSelectorMatchExpressions { /// Retention determines how long this schedule's snapshots will be kept. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SnapshotScheduleRetention { - /// The length of time (time.Duration) after which a given Snapshot will be deleted. + /// The length of time (time.Duration) after which a given Snapshot will be + /// deleted. #[serde(default, skip_serializing_if = "Option::is_none")] pub expires: Option, /// The maximum number of snapshots to retain per PVC @@ -67,7 +76,8 @@ pub struct SnapshotScheduleRetention { /// A template to customize the Snapshots. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SnapshotScheduleSnapshotTemplate { - /// A list of labels that should be added to each Snapshot created by this schedule. + /// A list of labels that should be added to each Snapshot created by this + /// schedule. #[serde(default, skip_serializing_if = "Option::is_none")] pub labels: Option>, /// The name of the VolumeSnapshotClass to be used when creating Snapshots. @@ -89,7 +99,8 @@ pub struct SnapshotScheduleStatus { pub next_snapshot_time: Option, } -/// Condition represents the state of the operator's reconciliation functionality. +/// Condition represents the state of the operator's +/// reconciliation functionality. #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct SnapshotScheduleStatusConditions { #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastHeartbeatTime")] diff --git a/kube-custom-resources-rs/src/trust_cert_manager_io/v1alpha1/bundles.rs b/kube-custom-resources-rs/src/trust_cert_manager_io/v1alpha1/bundles.rs index 33121ac2c..36d76e3f1 100644 --- a/kube-custom-resources-rs/src/trust_cert_manager_io/v1alpha1/bundles.rs +++ b/kube-custom-resources-rs/src/trust_cert_manager_io/v1alpha1/bundles.rs @@ -18,91 +18,123 @@ pub struct BundleSpec { pub target: BundleTarget, } -/// BundleSource is the set of sources whose data will be appended and synced to the BundleTarget in all Namespaces. +/// BundleSource is the set of sources whose data will be appended and synced to +/// the BundleTarget in all Namespaces. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleSources { - /// ConfigMap is a reference (by name) to a ConfigMap's `data` key, or to a list of ConfigMap's `data` key using label selector, in the trust Namespace. + /// ConfigMap is a reference (by name) to a ConfigMap's `data` key, or to a + /// list of ConfigMap's `data` key using label selector, in the trust Namespace. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMap")] pub config_map: Option, /// InLine is a simple string to append as the source data. #[serde(default, skip_serializing_if = "Option::is_none", rename = "inLine")] pub in_line: Option, - /// Secret is a reference (by name) to a Secret's `data` key, or to a list of Secret's `data` key using label selector, in the trust Namespace. + /// Secret is a reference (by name) to a Secret's `data` key, or to a + /// list of Secret's `data` key using label selector, in the trust Namespace. #[serde(default, skip_serializing_if = "Option::is_none")] pub secret: Option, - /// UseDefaultCAs, when true, requests the default CA bundle to be used as a source. Default CAs are available if trust-manager was installed via Helm or was otherwise set up to include a package-injecting init container by using the "--default-package-location" flag when starting the trust-manager controller. If default CAs were not configured at start-up, any request to use the default CAs will fail. The version of the default CA package which is used for a Bundle is stored in the defaultCAPackageVersion field of the Bundle's status field. + /// UseDefaultCAs, when true, requests the default CA bundle to be used as a source. + /// Default CAs are available if trust-manager was installed via Helm + /// or was otherwise set up to include a package-injecting init container by using the + /// "--default-package-location" flag when starting the trust-manager controller. + /// If default CAs were not configured at start-up, any request to use the default + /// CAs will fail. + /// The version of the default CA package which is used for a Bundle is stored in the + /// defaultCAPackageVersion field of the Bundle's status field. #[serde(default, skip_serializing_if = "Option::is_none", rename = "useDefaultCAs")] pub use_default_c_as: Option, } -/// ConfigMap is a reference (by name) to a ConfigMap's `data` key, or to a list of ConfigMap's `data` key using label selector, in the trust Namespace. +/// ConfigMap is a reference (by name) to a ConfigMap's `data` key, or to a +/// list of ConfigMap's `data` key using label selector, in the trust Namespace. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleSourcesConfigMap { /// Key is the key of the entry in the object's `data` field to be used. pub key: String, - /// Name is the name of the source object in the trust Namespace. This field must be left empty when `selector` is set + /// Name is the name of the source object in the trust Namespace. + /// This field must be left empty when `selector` is set #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Selector is the label selector to use to fetch a list of objects. Must not be set when `Name` is set. + /// Selector is the label selector to use to fetch a list of objects. Must not be set + /// when `Name` is set. #[serde(default, skip_serializing_if = "Option::is_none")] pub selector: Option, } -/// Selector is the label selector to use to fetch a list of objects. Must not be set when `Name` is set. +/// Selector is the label selector to use to fetch a list of objects. Must not be set +/// when `Name` is set. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleSourcesConfigMapSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleSourcesConfigMapSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } -/// Secret is a reference (by name) to a Secret's `data` key, or to a list of Secret's `data` key using label selector, in the trust Namespace. +/// Secret is a reference (by name) to a Secret's `data` key, or to a +/// list of Secret's `data` key using label selector, in the trust Namespace. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleSourcesSecret { /// Key is the key of the entry in the object's `data` field to be used. pub key: String, - /// Name is the name of the source object in the trust Namespace. This field must be left empty when `selector` is set + /// Name is the name of the source object in the trust Namespace. + /// This field must be left empty when `selector` is set #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option, - /// Selector is the label selector to use to fetch a list of objects. Must not be set when `Name` is set. + /// Selector is the label selector to use to fetch a list of objects. Must not be set + /// when `Name` is set. #[serde(default, skip_serializing_if = "Option::is_none")] pub selector: Option, } -/// Selector is the label selector to use to fetch a list of objects. Must not be set when `Name` is set. +/// Selector is the label selector to use to fetch a list of objects. Must not be set +/// when `Name` is set. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleSourcesSecretSelector { /// matchExpressions is a list of label selector requirements. The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchExpressions")] pub match_expressions: Option>, - /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + /// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + /// map is equivalent to an element of matchExpressions, whose key field is "key", the + /// operator is "In", and the values array contains only "value". The requirements are ANDed. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. +/// A label selector requirement is a selector that contains values, a key, and an operator that +/// relates the key and values. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleSourcesSecretSelectorMatchExpressions { /// key is the label key that the selector applies to. pub key: String, - /// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + /// operator represents a key's relationship to a set of values. + /// Valid operators are In, NotIn, Exists and DoesNotExist. pub operator: String, - /// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + /// values is an array of string values. If the operator is In or NotIn, + /// the values array must be non-empty. If the operator is Exists or DoesNotExist, + /// the values array must be empty. This array is replaced during a strategic + /// merge patch. #[serde(default, skip_serializing_if = "Option::is_none")] pub values: Option>, } @@ -113,13 +145,17 @@ pub struct BundleTarget { /// AdditionalFormats specifies any additional formats to write to the target #[serde(default, skip_serializing_if = "Option::is_none", rename = "additionalFormats")] pub additional_formats: Option, - /// ConfigMap is the target ConfigMap in Namespaces that all Bundle source data will be synced to. + /// ConfigMap is the target ConfigMap in Namespaces that all Bundle source + /// data will be synced to. #[serde(default, skip_serializing_if = "Option::is_none", rename = "configMap")] pub config_map: Option, - /// NamespaceSelector will, if set, only sync the target resource in Namespaces which match the selector. + /// NamespaceSelector will, if set, only sync the target resource in + /// Namespaces which match the selector. #[serde(default, skip_serializing_if = "Option::is_none", rename = "namespaceSelector")] pub namespace_selector: Option, - /// Secret is the target Secret that all Bundle source data will be synced to. Using Secrets as targets is only supported if enabled at trust-manager startup. By default, trust-manager has no permissions for writing to secrets and can only read secrets in the trust namespace. + /// Secret is the target Secret that all Bundle source data will be synced to. + /// Using Secrets as targets is only supported if enabled at trust-manager startup. + /// By default, trust-manager has no permissions for writing to secrets and can only read secrets in the trust namespace. #[serde(default, skip_serializing_if = "Option::is_none")] pub secret: Option, } @@ -127,15 +163,20 @@ pub struct BundleTarget { /// AdditionalFormats specifies any additional formats to write to the target #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleTargetAdditionalFormats { - /// JKS requests a JKS-formatted binary trust bundle to be written to the target. The bundle has "changeit" as the default password. For more information refer to this link https://cert-manager.io/docs/faq/#keystore-passwords + /// JKS requests a JKS-formatted binary trust bundle to be written to the target. + /// The bundle has "changeit" as the default password. + /// For more information refer to this link https://cert-manager.io/docs/faq/#keystore-passwords #[serde(default, skip_serializing_if = "Option::is_none")] pub jks: Option, - /// PKCS12 requests a PKCS12-formatted binary trust bundle to be written to the target. The bundle is by default created without a password. + /// PKCS12 requests a PKCS12-formatted binary trust bundle to be written to the target. + /// The bundle is by default created without a password. #[serde(default, skip_serializing_if = "Option::is_none")] pub pkcs12: Option, } -/// JKS requests a JKS-formatted binary trust bundle to be written to the target. The bundle has "changeit" as the default password. For more information refer to this link https://cert-manager.io/docs/faq/#keystore-passwords +/// JKS requests a JKS-formatted binary trust bundle to be written to the target. +/// The bundle has "changeit" as the default password. +/// For more information refer to this link https://cert-manager.io/docs/faq/#keystore-passwords #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleTargetAdditionalFormatsJks { /// Key is the key of the entry in the object's `data` field to be used. @@ -145,7 +186,8 @@ pub struct BundleTargetAdditionalFormatsJks { pub password: Option, } -/// PKCS12 requests a PKCS12-formatted binary trust bundle to be written to the target. The bundle is by default created without a password. +/// PKCS12 requests a PKCS12-formatted binary trust bundle to be written to the target. +/// The bundle is by default created without a password. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleTargetAdditionalFormatsPkcs12 { /// Key is the key of the entry in the object's `data` field to be used. @@ -155,22 +197,27 @@ pub struct BundleTargetAdditionalFormatsPkcs12 { pub password: Option, } -/// ConfigMap is the target ConfigMap in Namespaces that all Bundle source data will be synced to. +/// ConfigMap is the target ConfigMap in Namespaces that all Bundle source +/// data will be synced to. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleTargetConfigMap { /// Key is the key of the entry in the object's `data` field to be used. pub key: String, } -/// NamespaceSelector will, if set, only sync the target resource in Namespaces which match the selector. +/// NamespaceSelector will, if set, only sync the target resource in +/// Namespaces which match the selector. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleTargetNamespaceSelector { - /// MatchLabels matches on the set of labels that must be present on a Namespace for the Bundle target to be synced there. + /// MatchLabels matches on the set of labels that must be present on a + /// Namespace for the Bundle target to be synced there. #[serde(default, skip_serializing_if = "Option::is_none", rename = "matchLabels")] pub match_labels: Option>, } -/// Secret is the target Secret that all Bundle source data will be synced to. Using Secrets as targets is only supported if enabled at trust-manager startup. By default, trust-manager has no permissions for writing to secrets and can only read secrets in the trust namespace. +/// Secret is the target Secret that all Bundle source data will be synced to. +/// Using Secrets as targets is only supported if enabled at trust-manager startup. +/// By default, trust-manager has no permissions for writing to secrets and can only read secrets in the trust namespace. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleTargetSecret { /// Key is the key of the entry in the object's `data` field to be used. @@ -180,10 +227,14 @@ pub struct BundleTargetSecret { /// Status of the Bundle. This is set and managed automatically. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleStatus { - /// List of status conditions to indicate the status of the Bundle. Known condition types are `Bundle`. + /// List of status conditions to indicate the status of the Bundle. + /// Known condition types are `Bundle`. #[serde(default, skip_serializing_if = "Option::is_none")] pub conditions: Option>, - /// DefaultCAPackageVersion, if set and non-empty, indicates the version information which was retrieved when the set of default CAs was requested in the bundle source. This should only be set if useDefaultCAs was set to "true" on a source, and will be the same for the same version of a bundle with identical certificates. + /// DefaultCAPackageVersion, if set and non-empty, indicates the version information + /// which was retrieved when the set of default CAs was requested in the bundle + /// source. This should only be set if useDefaultCAs was set to "true" on a source, + /// and will be the same for the same version of a bundle with identical certificates. #[serde(default, skip_serializing_if = "Option::is_none", rename = "defaultCAVersion")] pub default_ca_version: Option, } @@ -191,16 +242,25 @@ pub struct BundleStatus { /// BundleCondition contains condition information for a Bundle. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct BundleStatusConditions { - /// LastTransitionTime is the timestamp corresponding to the last status change of this condition. + /// LastTransitionTime is the timestamp corresponding to the last status + /// change of this condition. #[serde(rename = "lastTransitionTime")] pub last_transition_time: String, - /// Message is a human-readable description of the details of the last transition, complementing reason. + /// Message is a human-readable description of the details of the last + /// transition, complementing reason. #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option, - /// If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Bundle. + /// If set, this represents the .metadata.generation that the condition was + /// set based upon. + /// For instance, if .metadata.generation is currently 12, but the + /// .status.condition[x].observedGeneration is 9, the condition is out of date + /// with respect to the current state of the Bundle. #[serde(default, skip_serializing_if = "Option::is_none", rename = "observedGeneration")] pub observed_generation: Option, - /// Reason is a brief machine-readable explanation for the condition's last transition. The value should be a CamelCase string. This field may not be empty. + /// Reason is a brief machine-readable explanation for the condition's last + /// transition. + /// The value should be a CamelCase string. + /// This field may not be empty. pub reason: String, /// Status of the condition, one of True, False, Unknown. pub status: BundleStatusConditionsStatus, diff --git a/kube-custom-resources-rs/src/wildfly_org/v1alpha1/wildflyservers.rs b/kube-custom-resources-rs/src/wildfly_org/v1alpha1/wildflyservers.rs index c8449c392..d48e3ecf7 100644 --- a/kube-custom-resources-rs/src/wildfly_org/v1alpha1/wildflyservers.rs +++ b/kube-custom-resources-rs/src/wildfly_org/v1alpha1/wildflyservers.rs @@ -32,6 +32,12 @@ pub struct WildFlyServerSpec { /// EnvFrom contains environment variables from a source such as a ConfigMap or a Secret #[serde(default, skip_serializing_if = "Option::is_none", rename = "envFrom")] pub env_from: Option>, + /// LivenessProbe defines the periodic probe of container liveness. Container will be restarted if the probe fails. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "livenessProbe")] + pub liveness_probe: Option, + /// ReadinessProbe defines the periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "readinessProbe")] + pub readiness_probe: Option, /// Replicas is the desired number of replicas for the application pub replicas: i32, /// ResourcesSpec defines the resources used by the WildFlyServer, ie CPU and memory, use limits and requests. More info: https://pkg.go.dev/k8s.io/api@v0.18.14/core/v1#ResourceRequirements @@ -51,6 +57,9 @@ pub struct WildFlyServerSpec { /// StandaloneConfigMapSpec defines the desired configMap configuration to obtain the standalone configuration for WildFlyServer #[serde(default, skip_serializing_if = "Option::is_none", rename = "standaloneConfigMap")] pub standalone_config_map: Option, + /// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "startupProbe")] + pub startup_probe: Option, /// StorageSpec defines specific storage required for the server own data directory. If omitted, an EmptyDir is used (that will not persist data across pod restart). #[serde(default, skip_serializing_if = "Option::is_none")] pub storage: Option, @@ -172,6 +181,130 @@ pub struct WildFlyServerEnvFromSecretRef { pub optional: Option, } +/// LivenessProbe defines the periodic probe of container liveness. Container will be restarted if the probe fails. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerLivenessProbe { + /// Exec specifies a command action to take. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub exec: Option, + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] + pub failure_threshold: Option, + /// HTTPGet specifies the http request to perform. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] + pub http_get: Option, + /// Number of seconds after the container has started before probes are initiated. It defaults to 60 seconds for liveness probe. It defaults to 10 seconds for readiness probe. It defaults to 0 seconds for startup probe. Minimum value is 0. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] + pub initial_delay_seconds: Option, + /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] + pub period_seconds: Option, + /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] + pub success_threshold: Option, + /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] + pub timeout_seconds: Option, +} + +/// Exec specifies a command action to take. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerLivenessProbeExec { + /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub command: Option>, +} + +/// HTTPGet specifies the http request to perform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerLivenessProbeHttpGet { + /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub host: Option, + /// Custom headers to set in the request. HTTP allows repeated headers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpHeaders")] + pub http_headers: Option>, + /// Path to access on the HTTP server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, + /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + pub port: IntOrString, + /// Scheme to use for connecting to the host. Defaults to HTTP. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub scheme: Option, +} + +/// HTTPHeader describes a custom header to be used in HTTP probes +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerLivenessProbeHttpGetHttpHeaders { + /// The header field name + pub name: String, + /// The header field value + pub value: String, +} + +/// ReadinessProbe defines the periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerReadinessProbe { + /// Exec specifies a command action to take. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub exec: Option, + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] + pub failure_threshold: Option, + /// HTTPGet specifies the http request to perform. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] + pub http_get: Option, + /// Number of seconds after the container has started before probes are initiated. It defaults to 60 seconds for liveness probe. It defaults to 10 seconds for readiness probe. It defaults to 0 seconds for startup probe. Minimum value is 0. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] + pub initial_delay_seconds: Option, + /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] + pub period_seconds: Option, + /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] + pub success_threshold: Option, + /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] + pub timeout_seconds: Option, +} + +/// Exec specifies a command action to take. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerReadinessProbeExec { + /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub command: Option>, +} + +/// HTTPGet specifies the http request to perform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerReadinessProbeHttpGet { + /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub host: Option, + /// Custom headers to set in the request. HTTP allows repeated headers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpHeaders")] + pub http_headers: Option>, + /// Path to access on the HTTP server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, + /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + pub port: IntOrString, + /// Scheme to use for connecting to the host. Defaults to HTTP. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub scheme: Option, +} + +/// HTTPHeader describes a custom header to be used in HTTP probes +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerReadinessProbeHttpGetHttpHeaders { + /// The header field name + pub name: String, + /// The header field value + pub value: String, +} + /// ResourcesSpec defines the resources used by the WildFlyServer, ie CPU and memory, use limits and requests. More info: https://pkg.go.dev/k8s.io/api@v0.18.14/core/v1#ResourceRequirements #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WildFlyServerResources { @@ -287,6 +420,68 @@ pub struct WildFlyServerStandaloneConfigMap { pub name: String, } +/// StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerStartupProbe { + /// Exec specifies a command action to take. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub exec: Option, + /// Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "failureThreshold")] + pub failure_threshold: Option, + /// HTTPGet specifies the http request to perform. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpGet")] + pub http_get: Option, + /// Number of seconds after the container has started before probes are initiated. It defaults to 60 seconds for liveness probe. It defaults to 10 seconds for readiness probe. It defaults to 0 seconds for startup probe. Minimum value is 0. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "initialDelaySeconds")] + pub initial_delay_seconds: Option, + /// How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "periodSeconds")] + pub period_seconds: Option, + /// Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "successThreshold")] + pub success_threshold: Option, + /// Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + #[serde(default, skip_serializing_if = "Option::is_none", rename = "timeoutSeconds")] + pub timeout_seconds: Option, +} + +/// Exec specifies a command action to take. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerStartupProbeExec { + /// Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub command: Option>, +} + +/// HTTPGet specifies the http request to perform. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerStartupProbeHttpGet { + /// Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub host: Option, + /// Custom headers to set in the request. HTTP allows repeated headers. + #[serde(default, skip_serializing_if = "Option::is_none", rename = "httpHeaders")] + pub http_headers: Option>, + /// Path to access on the HTTP server. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub path: Option, + /// Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + pub port: IntOrString, + /// Scheme to use for connecting to the host. Defaults to HTTP. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub scheme: Option, +} + +/// HTTPHeader describes a custom header to be used in HTTP probes +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct WildFlyServerStartupProbeHttpGetHttpHeaders { + /// The header field name + pub name: String, + /// The header field value + pub value: String, +} + /// StorageSpec defines specific storage required for the server own data directory. If omitted, an EmptyDir is used (that will not persist data across pod restart). #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct WildFlyServerStorage {