diff --git a/Makefile b/Makefile index c466cc87ba73..4cb742ec41cf 100644 --- a/Makefile +++ b/Makefile @@ -631,7 +631,7 @@ pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go: $(TYPES) dist/kubernetes.swagger.json: @mkdir -p dist - ./hack/recurl.sh dist/kubernetes.swagger.json https://raw.githubusercontent.com/kubernetes/kubernetes/v1.23.3/api/openapi-spec/swagger.json + ./hack/recurl.sh dist/kubernetes.swagger.json https://raw.githubusercontent.com/kubernetes/kubernetes/v1.27.12/api/openapi-spec/swagger.json pkg/apiclient/_.secondary.swagger.json: hack/api/swagger/secondaryswaggergen.go pkg/apis/workflow/v1alpha1/openapi_generated.go dist/kubernetes.swagger.json rm -Rf v3 vendor diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json index d96eb25b7bc3..00f9cd47caa4 100644 --- a/api/jsonschema/schema.json +++ b/api/jsonschema/schema.json @@ -4709,6 +4709,14 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Probe", "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" @@ -6725,6 +6733,14 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Probe", "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" @@ -7335,6 +7351,14 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Probe", "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" @@ -8376,19 +8400,19 @@ "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "type": "string" }, "partition": { - "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", "type": "integer" }, "readOnly": { - "description": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "type": "boolean" }, "volumeID": { - "description": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "type": "string" } }, @@ -8419,27 +8443,27 @@ "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", "properties": { "cachingMode": { - "description": "Host Caching mode: None, Read Only, Read Write.", + "description": "cachingMode is the Host Caching mode: None, Read Only, Read Write.", "type": "string" }, "diskName": { - "description": "The Name of the data disk in the blob storage", + "description": "diskName is the Name of the data disk in the blob storage", "type": "string" }, "diskURI": { - "description": "The URI the data disk in the blob storage", + "description": "diskURI is the URI of data disk in the blob storage", "type": "string" }, "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "kind": { - "description": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + "description": "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" } }, @@ -8453,15 +8477,15 @@ "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", "properties": { "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretName": { - "description": "the name of secret that contains Azure Storage Account Name and Key", + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", "type": "string" }, "shareName": { - "description": "Share Name", + "description": "shareName is the azure share Name", "type": "string" } }, @@ -8475,26 +8499,26 @@ "description": "Represents a source location of a volume to mount, managed by an external CSI driver", "properties": { "driver": { - "description": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "description": "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", "type": "string" }, "fsType": { - "description": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "description": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", "type": "string" }, "nodePublishSecretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed." + "description": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed." }, "readOnly": { - "description": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", + "description": "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).", "type": "boolean" }, "volumeAttributes": { "additionalProperties": { "type": "string" }, - "description": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "description": "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", "type": "object" } }, @@ -8527,30 +8551,30 @@ "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", "properties": { "monitors": { - "description": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "items": { "type": "string" }, "type": "array" }, "path": { - "description": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", "type": "string" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "boolean" }, "secretFile": { - "description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "string" }, "secretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + "description": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" }, "user": { - "description": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "string" } }, @@ -8563,19 +8587,19 @@ "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "type": "string" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "type": "boolean" }, "secretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "Optional: points to a secret object containing parameters used to connect to OpenStack." + "description": "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack." }, "volumeID": { - "description": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "type": "string" } }, @@ -8624,7 +8648,7 @@ "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "properties": { "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, @@ -8635,7 +8659,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined", + "description": "optional specify whether the ConfigMap or its keys must be defined", "type": "boolean" } }, @@ -8645,11 +8669,11 @@ "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", "properties": { "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, @@ -8660,7 +8684,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined", + "description": "optional specify whether the ConfigMap or its keys must be defined", "type": "boolean" } }, @@ -8670,14 +8694,14 @@ "description": "A single application container that you want to run within a pod.", "properties": { "args": { - "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "items": { "type": "string" }, "type": "array" }, "command": { - "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "items": { "type": "string" }, @@ -8700,16 +8724,11 @@ "type": "array" }, "image": { - "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", "type": "string" }, "imagePullPolicy": { - "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\nPossible enum values:\n - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.\n - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.\n - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present", - "enum": [ - "Always", - "IfNotPresent", - "Never" - ], + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", "type": "string" }, "lifecycle": { @@ -8725,7 +8744,7 @@ "type": "string" }, "ports": { - "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "description": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" }, @@ -8742,6 +8761,14 @@ "$ref": "#/definitions/io.k8s.api.core.v1.Probe", "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" @@ -8767,11 +8794,7 @@ "type": "string" }, "terminationMessagePolicy": { - "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\nPossible enum values:\n - `\"FallbackToLogsOnError\"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents.\n - `\"File\"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits.", - "enum": [ - "FallbackToLogsOnError", - "File" - ], + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "type": "string" }, "tty": { @@ -8826,12 +8849,7 @@ "type": "string" }, "protocol": { - "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".\n\nPossible enum values:\n - `\"SCTP\"` is the SCTP protocol.\n - `\"TCP\"` is the TCP protocol.\n - `\"UDP\"` is the UDP protocol.", - "enum": [ - "SCTP", - "TCP", - "UDP" - ], + "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", "type": "string" } }, @@ -8840,6 +8858,24 @@ ], "type": "object" }, + "io.k8s.api.core.v1.ContainerResizePolicy": { + "description": "ContainerResizePolicy represents resource resize policy for the container.", + "properties": { + "resourceName": { + "description": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "type": "string" + }, + "restartPolicy": { + "description": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, "io.k8s.api.core.v1.DownwardAPIProjection": { "description": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", "properties": { @@ -8899,12 +8935,12 @@ "description": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", "properties": { "medium": { - "description": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "description": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", "type": "string" }, "sizeLimit": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", - "description": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir" + "description": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" } }, "type": "object" @@ -9110,26 +9146,26 @@ "description": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "lun": { - "description": "Optional: FC target lun number", + "description": "lun is Optional: FC target lun number", "type": "integer" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "targetWWNs": { - "description": "Optional: FC target worldwide names (WWNs)", + "description": "targetWWNs is Optional: FC target worldwide names (WWNs)", "items": { "type": "string" }, "type": "array" }, "wwids": { - "description": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + "description": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", "items": { "type": "string" }, @@ -9142,27 +9178,27 @@ "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "properties": { "driver": { - "description": "Driver is the name of the driver to use for this volume.", + "description": "driver is the name of the driver to use for this volume.", "type": "string" }, "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", "type": "string" }, "options": { "additionalProperties": { "type": "string" }, - "description": "Optional: Extra command options if any.", + "description": "options is Optional: this field holds extra command options if any.", "type": "object" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." + "description": "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." } }, "required": [ @@ -9174,11 +9210,11 @@ "description": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", "properties": { "datasetName": { - "description": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", + "description": "datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", "type": "string" }, "datasetUUID": { - "description": "UUID of the dataset. This is unique identifier of a Flocker dataset", + "description": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", "type": "string" } }, @@ -9188,19 +9224,19 @@ "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "string" }, "partition": { - "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "integer" }, "pdName": { - "description": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "boolean" } }, @@ -9229,15 +9265,15 @@ "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "properties": { "directory": { - "description": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", + "description": "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", "type": "string" }, "repository": { - "description": "Repository URL", + "description": "repository is the URL", "type": "string" }, "revision": { - "description": "Commit hash for the specified revision.", + "description": "revision is the commit hash for the specified revision.", "type": "string" } }, @@ -9250,15 +9286,15 @@ "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", "properties": { "endpoints": { - "description": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "string" }, "path": { - "description": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "boolean" } }, @@ -9291,11 +9327,7 @@ "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." }, "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP.\n\nPossible enum values:\n - `\"HTTP\"` means that the scheme used will be http://\n - `\"HTTPS\"` means that the scheme used will be https://", - "enum": [ - "HTTP", - "HTTPS" - ], + "description": "Scheme to use for connecting to the host. Defaults to HTTP.", "type": "string" } }, @@ -9308,7 +9340,7 @@ "description": "HTTPHeader describes a custom header to be used in HTTP probes", "properties": { "name": { - "description": "The header field name", + "description": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", "type": "string" }, "value": { @@ -9343,11 +9375,11 @@ "description": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", "properties": { "path": { - "description": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "description": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "type": "string" }, "type": { - "description": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "description": "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "type": "string" } }, @@ -9360,50 +9392,50 @@ "description": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", "properties": { "chapAuthDiscovery": { - "description": "whether support iSCSI Discovery CHAP authentication", + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", "type": "boolean" }, "chapAuthSession": { - "description": "whether support iSCSI Session CHAP authentication", + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication", "type": "boolean" }, "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", "type": "string" }, "initiatorName": { - "description": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", + "description": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", "type": "string" }, "iqn": { - "description": "Target iSCSI Qualified Name.", + "description": "iqn is the target iSCSI Qualified Name.", "type": "string" }, "iscsiInterface": { - "description": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "description": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", "type": "string" }, "lun": { - "description": "iSCSI Target Lun number.", + "description": "lun represents iSCSI Target Lun number.", "type": "integer" }, "portals": { - "description": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "description": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "items": { "type": "string" }, "type": "array" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", "type": "boolean" }, "secretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "CHAP Secret for iSCSI target and initiator authentication" + "description": "secretRef is the CHAP Secret for iSCSI target and initiator authentication" }, "targetPortal": { - "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "type": "string" } }, @@ -9418,15 +9450,15 @@ "description": "Maps a string key to a path within a volume.", "properties": { "key": { - "description": "The key to project.", + "description": "key is the key to project.", "type": "string" }, "mode": { - "description": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "path": { - "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "description": "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", "type": "string" } }, @@ -9483,15 +9515,15 @@ "description": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", "properties": { "path": { - "description": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "boolean" }, "server": { - "description": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "string" } }, @@ -9543,15 +9575,7 @@ "type": "string" }, "operator": { - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\n\nPossible enum values:\n - `\"DoesNotExist\"`\n - `\"Exists\"`\n - `\"Gt\"`\n - `\"In\"`\n - `\"Lt\"`\n - `\"NotIn\"`", - "enum": [ - "DoesNotExist", - "Exists", - "Gt", - "In", - "Lt", - "NotIn" - ], + "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", "type": "string" }, "values": { @@ -9659,11 +9683,11 @@ }, "spec": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec", - "description": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + "description": "spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" }, "status": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimStatus", - "description": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + "description": "status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" } }, "type": "object", @@ -9676,33 +9700,28 @@ ] }, "io.k8s.api.core.v1.PersistentVolumeClaimCondition": { - "description": "PersistentVolumeClaimCondition contails details about state of pvc", + "description": "PersistentVolumeClaimCondition contains details about state of pvc", "properties": { "lastProbeTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "Last time we probed the condition." + "description": "lastProbeTime is the time we probed the condition." }, "lastTransitionTime": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "Last time the condition transitioned from one status to another." + "description": "lastTransitionTime is the time the condition transitioned from one status to another." }, "message": { - "description": "Human-readable message indicating details about last transition.", + "description": "message is the human-readable message indicating details about last transition.", "type": "string" }, "reason": { - "description": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", "type": "string" }, "status": { "type": "string" }, "type": { - "description": "\n\n\nPossible enum values:\n - `\"FileSystemResizePending\"` - controller resize is finished and a file system resize is pending on node\n - `\"Resizing\"` - a user trigger resize of pvc has been started", - "enum": [ - "FileSystemResizePending", - "Resizing" - ], "type": "string" } }, @@ -9716,7 +9735,7 @@ "description": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", "properties": { "accessModes": { - "description": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "description": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "items": { "type": "string" }, @@ -9724,22 +9743,22 @@ }, "dataSource": { "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference", - "description": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field." + "description": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource." }, "dataSourceRef": { - "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference", - "description": "Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled." + "$ref": "#/definitions/io.k8s.api.core.v1.TypedObjectReference", + "description": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", - "description": "Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + "description": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" }, "selector": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", - "description": "A label query over volumes to consider for binding." + "description": "selector is a label query over volumes to consider for binding." }, "storageClassName": { - "description": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "description": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "type": "string" }, "volumeMode": { @@ -9747,7 +9766,7 @@ "type": "string" }, "volumeName": { - "description": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "description": "volumeName is the binding reference to the PersistentVolume backing this claim.", "type": "string" } }, @@ -9757,7 +9776,7 @@ "description": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", "properties": { "accessModes": { - "description": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "description": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "items": { "type": "string" }, @@ -9767,18 +9786,18 @@ "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" }, - "description": "The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "description": "allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "type": "object" }, "capacity": { "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" }, - "description": "Represents the actual resources of the underlying volume.", + "description": "capacity represents the actual resources of the underlying volume.", "type": "object" }, "conditions": { - "description": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimCondition" }, @@ -9787,16 +9806,11 @@ "x-kubernetes-patch-strategy": "merge" }, "phase": { - "description": "Phase represents the current phase of PersistentVolumeClaim.\n\nPossible enum values:\n - `\"Bound\"` used for PersistentVolumeClaims that are bound\n - `\"Lost\"` used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost.\n - `\"Pending\"` used for PersistentVolumeClaims that are not yet bound", - "enum": [ - "Bound", - "Lost", - "Pending" - ], + "description": "phase represents the current phase of PersistentVolumeClaim.", "type": "string" }, "resizeStatus": { - "description": "ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "description": "resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "type": "string" } }, @@ -9823,11 +9837,11 @@ "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", "properties": { "claimName": { - "description": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "description": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "type": "string" }, "readOnly": { - "description": "Will force the ReadOnly setting in VolumeMounts. Default false.", + "description": "readOnly Will force the ReadOnly setting in VolumeMounts. Default false.", "type": "boolean" } }, @@ -9840,11 +9854,11 @@ "description": "Represents a Photon Controller persistent disk resource.", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "pdID": { - "description": "ID that identifies Photon Controller persistent disk", + "description": "pdID is the ID that identifies Photon Controller persistent disk", "type": "string" } }, @@ -9882,10 +9896,10 @@ }, "namespaceSelector": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", - "description": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled." + "description": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." }, "namespaces": { - "description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"", + "description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "items": { "type": "string" }, @@ -9993,7 +10007,7 @@ "description": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows." }, "supplementalGroups": { - "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.", + "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", "items": { "format": "int64", "type": "integer" @@ -10018,15 +10032,15 @@ "description": "PortworxVolumeSource represents a Portworx volume resource.", "properties": { "fsType": { - "description": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "volumeID": { - "description": "VolumeID uniquely identifies a Portworx volume", + "description": "volumeID uniquely identifies a Portworx volume", "type": "string" } }, @@ -10066,7 +10080,7 @@ }, "grpc": { "$ref": "#/definitions/io.k8s.api.core.v1.GRPCAction", - "description": "GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate." + "description": "GRPC specifies an action involving a GRPC port." }, "httpGet": { "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction", @@ -10103,11 +10117,11 @@ "description": "Represents a projected volume source", "properties": { "defaultMode": { - "description": "Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "sources": { - "description": "list of volume projections", + "description": "sources is the list of volume projections", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeProjection" }, @@ -10120,27 +10134,27 @@ "description": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", "properties": { "group": { - "description": "Group to map volume access to Default is no group", + "description": "group to map volume access to Default is no group", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "description": "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", "type": "boolean" }, "registry": { - "description": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "description": "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", "type": "string" }, "tenant": { - "description": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", + "description": "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", "type": "string" }, "user": { - "description": "User to map volume access to Defaults to serivceaccount user", + "description": "user to map volume access to Defaults to serivceaccount user", "type": "string" }, "volume": { - "description": "Volume is a string that references an already created Quobyte volume by name.", + "description": "volume is a string that references an already created Quobyte volume by name.", "type": "string" } }, @@ -10154,38 +10168,38 @@ "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", "type": "string" }, "image": { - "description": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string" }, "keyring": { - "description": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string" }, "monitors": { - "description": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "items": { "type": "string" }, "type": "array" }, "pool": { - "description": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "boolean" }, "secretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + "description": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" }, "user": { - "description": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string" } }, @@ -10195,6 +10209,19 @@ ], "type": "object" }, + "io.k8s.api.core.v1.ResourceClaim": { + "description": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "properties": { + "name": { + "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, "io.k8s.api.core.v1.ResourceFieldSelector": { "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", "properties": { @@ -10220,6 +10247,17 @@ "io.k8s.api.core.v1.ResourceRequirements": { "description": "ResourceRequirements describes the compute resource requirements.", "properties": { + "claims": { + "description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceClaim" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" @@ -10231,7 +10269,7 @@ "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" }, - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object" } }, @@ -10263,43 +10301,43 @@ "description": "ScaleIOVolumeSource represents a persistent ScaleIO volume", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", "type": "string" }, "gateway": { - "description": "The host address of the ScaleIO API Gateway.", + "description": "gateway is the host address of the ScaleIO API Gateway.", "type": "string" }, "protectionDomain": { - "description": "The name of the ScaleIO Protection Domain for the configured storage.", + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail." + "description": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail." }, "sslEnabled": { - "description": "Flag to enable/disable SSL communication with Gateway, default false", + "description": "sslEnabled Flag enable/disable SSL communication with Gateway, default false", "type": "boolean" }, "storageMode": { - "description": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", "type": "string" }, "storagePool": { - "description": "The ScaleIO Storage Pool associated with the protection domain.", + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", "type": "string" }, "system": { - "description": "The name of the storage system as configured in ScaleIO.", + "description": "system is the name of the storage system as configured in ScaleIO.", "type": "string" }, "volumeName": { - "description": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "description": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", "type": "string" } }, @@ -10318,12 +10356,7 @@ "type": "string" }, "type": { - "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.\n\nPossible enum values:\n - `\"Localhost\"` indicates a profile defined in a file on the node should be used. The file's location relative to \u003ckubelet-root-dir\u003e/seccomp.\n - `\"RuntimeDefault\"` represents the default container runtime seccomp profile.\n - `\"Unconfined\"` indicates no seccomp profile is applied (A.K.A. unconfined).", - "enum": [ - "Localhost", - "RuntimeDefault", - "Unconfined" - ], + "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", "type": "string" } }, @@ -10380,7 +10413,7 @@ "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", "properties": { "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, @@ -10391,7 +10424,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the Secret or its key must be defined", + "description": "optional field specify whether the Secret or its key must be defined", "type": "boolean" } }, @@ -10401,22 +10434,22 @@ "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", "properties": { "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" }, "type": "array" }, "optional": { - "description": "Specify whether the Secret or its keys must be defined", + "description": "optional field specify whether the Secret or its keys must be defined", "type": "boolean" }, "secretName": { - "description": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "description": "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "type": "string" } }, @@ -10476,15 +10509,15 @@ "description": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", "properties": { "audience": { - "description": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "description": "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", "type": "string" }, "expirationSeconds": { - "description": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "description": "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", "type": "integer" }, "path": { - "description": "Path is the path relative to the mount point of the file to project the token into.", + "description": "path is the path relative to the mount point of the file to project the token into.", "type": "string" } }, @@ -10497,7 +10530,7 @@ "description": "ServicePort contains information on service's port.", "properties": { "appProtocol": { - "description": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { @@ -10513,12 +10546,7 @@ "type": "integer" }, "protocol": { - "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.\n\nPossible enum values:\n - `\"SCTP\"` is the SCTP protocol.\n - `\"TCP\"` is the TCP protocol.\n - `\"UDP\"` is the UDP protocol.", - "enum": [ - "SCTP", - "TCP", - "UDP" - ], + "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", "type": "string" }, "targetPort": { @@ -10535,23 +10563,23 @@ "description": "Represents a StorageOS persistent volume resource.", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretRef": { "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", - "description": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted." + "description": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted." }, "volumeName": { - "description": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "description": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", "type": "string" }, "volumeNamespace": { - "description": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", "type": "string" } }, @@ -10596,12 +10624,7 @@ "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", "properties": { "effect": { - "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n\nPossible enum values:\n - `\"NoExecute\"` Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController.\n - `\"NoSchedule\"` Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler.\n - `\"PreferNoSchedule\"` Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler.", - "enum": [ - "NoExecute", - "NoSchedule", - "PreferNoSchedule" - ], + "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", "type": "string" }, "key": { @@ -10609,11 +10632,7 @@ "type": "string" }, "operator": { - "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.\n\nPossible enum values:\n - `\"Equal\"`\n - `\"Exists\"`", - "enum": [ - "Equal", - "Exists" - ], + "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", "type": "string" }, "tolerationSeconds": { @@ -10650,128 +10669,153 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, + "io.k8s.api.core.v1.TypedObjectReference": { + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object" + }, "io.k8s.api.core.v1.Volume": { "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", "properties": { "awsElasticBlockStore": { "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource", - "description": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" }, "azureDisk": { "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource", - "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." }, "azureFile": { "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource", - "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod." + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod." }, "cephfs": { "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource", - "description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime" + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime" }, "cinder": { "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource", - "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" }, "configMap": { "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource", - "description": "ConfigMap represents a configMap that should populate this volume" + "description": "configMap represents a configMap that should populate this volume" }, "csi": { "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource", - "description": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)." + "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)." }, "downwardAPI": { "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource", - "description": "DownwardAPI represents downward API about the pod that should populate this volume" + "description": "downwardAPI represents downward API about the pod that should populate this volume" }, "emptyDir": { "$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource", - "description": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + "description": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" }, "ephemeral": { "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource", - "description": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time." + "description": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time." }, "fc": { "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource", - "description": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." + "description": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." }, "flexVolume": { "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource", - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." }, "flocker": { "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource", - "description": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running" + "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running" }, "gcePersistentDisk": { "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource", - "description": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" }, "gitRepo": { "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." + "description": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "glusterfs": { "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource", - "description": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md" + "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md" }, "hostPath": { "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource", - "description": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + "description": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" }, "iscsi": { "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource", - "description": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md" + "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md" }, "name": { - "description": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "nfs": { "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource", - "description": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + "description": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" }, "persistentVolumeClaim": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource", - "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + "description": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" }, "photonPersistentDisk": { "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource", - "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" }, "portworxVolume": { "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource", - "description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine" + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine" }, "projected": { "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource", - "description": "Items for all in one resources secrets, configmaps, and downward API" + "description": "projected items for all in one resources secrets, configmaps, and downward API" }, "quobyte": { "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource", - "description": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime" + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime" }, "rbd": { "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource", - "description": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md" + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md" }, "scaleIO": { "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource", - "description": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." }, "secret": { "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource", - "description": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + "description": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" }, "storageos": { "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource", - "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." + "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." }, "vsphereVolume": { "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource", - "description": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" } }, "required": [ @@ -10836,19 +10880,19 @@ "properties": { "configMap": { "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapProjection", - "description": "information about the configMap data to project" + "description": "configMap information about the configMap data to project" }, "downwardAPI": { "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIProjection", - "description": "information about the downwardAPI data to project" + "description": "downwardAPI information about the downwardAPI data to project" }, "secret": { "$ref": "#/definitions/io.k8s.api.core.v1.SecretProjection", - "description": "information about the secret data to project" + "description": "secret information about the secret data to project" }, "serviceAccountToken": { "$ref": "#/definitions/io.k8s.api.core.v1.ServiceAccountTokenProjection", - "description": "information about the serviceAccountToken data to project" + "description": "serviceAccountToken is information about the serviceAccountToken data to project" } }, "type": "object" @@ -10857,19 +10901,19 @@ "description": "Represents a vSphere volume resource.", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "storagePolicyID": { - "description": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + "description": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", "type": "string" }, "storagePolicyName": { - "description": "Storage Policy Based Management (SPBM) profile name.", + "description": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.", "type": "string" }, "volumePath": { - "description": "Path that identifies vSphere volume vmdk", + "description": "volumePath is the path that identifies vSphere volume vmdk", "type": "string" } }, @@ -10933,12 +10977,16 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", "description": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", "x-kubernetes-patch-strategy": "replace" + }, + "unhealthyPodEvictionPolicy": { + "description": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", + "type": "string" } }, "type": "object" }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n\n\t(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "type": "string" }, "io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions": { @@ -10956,7 +11004,7 @@ "type": "string" }, "fieldValidation": { - "title": "fieldValidation instructs the server on how to handle\nobjects in the request (POST/PUT/PATCH) containing unknown\nor duplicate fields, provided that the `ServerSideFieldValidation`\nfeature gate is also enabled. Valid values are:\n- Ignore: This will ignore any unknown fields that are silently\ndropped from the object, and will ignore all but the last duplicate\nfield that the decoder encounters. This is the default behavior\nprior to v1.23 and is the default behavior when the\n`ServerSideFieldValidation` feature gate is disabled.\n- Warn: This will send a warning via the standard warning response\nheader for each unknown field that is dropped from the object, and\nfor each duplicate field that is encountered. The request will\nstill succeed if there are no other errors, and will only persist\nthe last of any duplicate fields. This is the default when the\n`ServerSideFieldValidation` feature gate is enabled.\n- Strict: This will fail the request with a BadRequest error if\nany unknown fields would be dropped from the object, or if any\nduplicate fields are present. The error returned from the server\nwill contain all unknown and duplicate fields encountered.\n+optional", + "title": "fieldValidation instructs the server on how to handle\nobjects in the request (POST/PUT/PATCH) containing unknown\nor duplicate fields. Valid values are:\n- Ignore: This will ignore any unknown fields that are silently\ndropped from the object, and will ignore all but the last duplicate\nfield that the decoder encounters. This is the default behavior\nprior to v1.23.\n- Warn: This will send a warning via the standard warning response\nheader for each unknown field that is dropped from the object, and\nfor each duplicate field that is encountered. The request will\nstill succeed if there are no other errors, and will only persist\nthe last of any duplicate fields. This is the default in v1.23+\n- Strict: This will fail the request with a BadRequest error if\nany unknown fields would be dropped from the object, or if any\nduplicate fields are present. The error returned from the server\nwill contain all unknown and duplicate fields encountered.\n+optional", "type": "string" } }, @@ -11055,7 +11103,7 @@ "type": "string" }, "selfLink": { - "description": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", "type": "string" } }, @@ -11090,7 +11138,7 @@ }, "time": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", - "description": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'" + "description": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over." } }, "type": "object" @@ -11107,13 +11155,9 @@ "additionalProperties": { "type": "string" }, - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "type": "object" }, - "clusterName": { - "description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "type": "string" - }, "creationTimestamp": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" @@ -11135,7 +11179,7 @@ "x-kubernetes-patch-strategy": "merge" }, "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "type": "string" }, "generation": { @@ -11146,7 +11190,7 @@ "additionalProperties": { "type": "string" }, - "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", "type": "object" }, "managedFields": { @@ -11157,11 +11201,11 @@ "type": "array" }, "name": { - "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "type": "string" }, "namespace": { - "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", "type": "string" }, "ownerReferences": { @@ -11178,11 +11222,11 @@ "type": "string" }, "selfLink": { - "description": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", "type": "string" }, "uid": { - "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "type": "string" } }, @@ -11196,7 +11240,7 @@ "type": "string" }, "blockOwnerDeletion": { - "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", "type": "boolean" }, "controller": { @@ -11208,11 +11252,11 @@ "type": "string" }, "name": { - "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "type": "string" }, "uid": { - "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "type": "string" } }, diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index f2dcef958b36..5d073be1f4ba 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -80,6 +80,12 @@ "name": "listOptions.continue", "in": "query" }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" + }, { "type": "string", "name": "namePrefix", @@ -199,6 +205,12 @@ "name": "listOptions.continue", "in": "query" }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" + }, { "type": "string", "name": "namespace", @@ -432,6 +444,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -722,6 +740,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -1130,6 +1154,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -1474,6 +1504,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -1755,6 +1791,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -1978,6 +2020,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -2073,6 +2121,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -2366,6 +2420,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -2453,6 +2513,12 @@ "name": "listOptions.continue", "in": "query" }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" + }, { "type": "string", "name": "fields", @@ -2557,6 +2623,12 @@ "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "name": "listOptions.continue", "in": "query" + }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" } ], "responses": { @@ -2878,6 +2950,12 @@ "name": "listOptions.continue", "in": "query" }, + { + "type": "boolean", + "description": "`sendInitialEvents=true` may be set together with `watch=true`.\nIn that case, the watch stream will begin with synthetic events to\nproduce the current state of objects in the collection. Once all such\nevents have been sent, a synthetic \"Bookmark\" event will be sent.\nThe bookmark will report the ResourceVersion (RV) corresponding to the\nset of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation.\nAfterwards, the watch stream will proceed as usual, sending watch events\ncorresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch`\noption to also be set. The semantic of the watch request is as following:\n- `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward\ncompatibility reasons) and to false otherwise.\n+optional", + "name": "listOptions.sendInitialEvents", + "in": "query" + }, { "type": "string", "description": "Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\".", @@ -8662,6 +8740,14 @@ "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/io.k8s.api.core.v1.Probe" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "x-kubernetes-list-type": "atomic" + }, "resources": { "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" @@ -10670,6 +10756,14 @@ "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/io.k8s.api.core.v1.Probe" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "x-kubernetes-list-type": "atomic" + }, "resources": { "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" @@ -11279,6 +11373,14 @@ "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/io.k8s.api.core.v1.Probe" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "x-kubernetes-list-type": "atomic" + }, "resources": { "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" @@ -12293,19 +12395,19 @@ ], "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "type": "string" }, "partition": { - "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", "type": "integer" }, "readOnly": { - "description": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "type": "boolean" }, "volumeID": { - "description": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "type": "string" } } @@ -12337,27 +12439,27 @@ ], "properties": { "cachingMode": { - "description": "Host Caching mode: None, Read Only, Read Write.", + "description": "cachingMode is the Host Caching mode: None, Read Only, Read Write.", "type": "string" }, "diskName": { - "description": "The Name of the data disk in the blob storage", + "description": "diskName is the Name of the data disk in the blob storage", "type": "string" }, "diskURI": { - "description": "The URI the data disk in the blob storage", + "description": "diskURI is the URI of data disk in the blob storage", "type": "string" }, "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "kind": { - "description": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + "description": "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" } } @@ -12371,15 +12473,15 @@ ], "properties": { "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretName": { - "description": "the name of secret that contains Azure Storage Account Name and Key", + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", "type": "string" }, "shareName": { - "description": "Share Name", + "description": "shareName is the azure share Name", "type": "string" } } @@ -12392,23 +12494,23 @@ ], "properties": { "driver": { - "description": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "description": "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", "type": "string" }, "fsType": { - "description": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "description": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", "type": "string" }, "nodePublishSecretRef": { - "description": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", + "description": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "readOnly": { - "description": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", + "description": "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).", "type": "boolean" }, "volumeAttributes": { - "description": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "description": "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", "type": "object", "additionalProperties": { "type": "string" @@ -12444,30 +12546,30 @@ ], "properties": { "monitors": { - "description": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "array", "items": { "type": "string" } }, "path": { - "description": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", "type": "string" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "boolean" }, "secretFile": { - "description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "string" }, "secretRef": { - "description": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "user": { - "description": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "description": "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", "type": "string" } } @@ -12480,19 +12582,19 @@ ], "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "type": "string" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "type": "boolean" }, "secretRef": { - "description": "Optional: points to a secret object containing parameters used to connect to OpenStack.", + "description": "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "volumeID": { - "description": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "type": "string" } } @@ -12538,7 +12640,7 @@ "type": "object", "properties": { "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" @@ -12549,7 +12651,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined", + "description": "optional specify whether the ConfigMap or its keys must be defined", "type": "boolean" } } @@ -12559,11 +12661,11 @@ "type": "object", "properties": { "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" @@ -12574,7 +12676,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined", + "description": "optional specify whether the ConfigMap or its keys must be defined", "type": "boolean" } } @@ -12587,14 +12689,14 @@ ], "properties": { "args": { - "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "type": "array", "items": { "type": "string" } }, "command": { - "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "type": "array", "items": { "type": "string" @@ -12617,17 +12719,12 @@ } }, "image": { - "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", "type": "string" }, "imagePullPolicy": { - "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n\nPossible enum values:\n - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.\n - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.\n - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present", - "type": "string", - "enum": [ - "Always", - "IfNotPresent", - "Never" - ] + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" }, "lifecycle": { "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", @@ -12642,7 +12739,7 @@ "type": "string" }, "ports": { - "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "description": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" @@ -12659,6 +12756,14 @@ "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/io.k8s.api.core.v1.Probe" }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "x-kubernetes-list-type": "atomic" + }, "resources": { "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" @@ -12684,12 +12789,8 @@ "type": "string" }, "terminationMessagePolicy": { - "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.\n\nPossible enum values:\n - `\"FallbackToLogsOnError\"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents.\n - `\"File\"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits.", - "type": "string", - "enum": [ - "FallbackToLogsOnError", - "File" - ] + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" }, "tty": { "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", @@ -12743,13 +12844,26 @@ "type": "string" }, "protocol": { - "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".\n\nPossible enum values:\n - `\"SCTP\"` is the SCTP protocol.\n - `\"TCP\"` is the TCP protocol.\n - `\"UDP\"` is the UDP protocol.", - "type": "string", - "enum": [ - "SCTP", - "TCP", - "UDP" - ] + "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ContainerResizePolicy": { + "description": "ContainerResizePolicy represents resource resize policy for the container.", + "type": "object", + "required": [ + "resourceName", + "restartPolicy" + ], + "properties": { + "resourceName": { + "description": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "type": "string" + }, + "restartPolicy": { + "description": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", + "type": "string" } } }, @@ -12813,11 +12927,11 @@ "type": "object", "properties": { "medium": { - "description": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "description": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", "type": "string" }, "sizeLimit": { - "description": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "description": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" } } @@ -13024,26 +13138,26 @@ "type": "object", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "lun": { - "description": "Optional: FC target lun number", + "description": "lun is Optional: FC target lun number", "type": "integer" }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "targetWWNs": { - "description": "Optional: FC target worldwide names (WWNs)", + "description": "targetWWNs is Optional: FC target worldwide names (WWNs)", "type": "array", "items": { "type": "string" } }, "wwids": { - "description": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + "description": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", "type": "array", "items": { "type": "string" @@ -13059,26 +13173,26 @@ ], "properties": { "driver": { - "description": "Driver is the name of the driver to use for this volume.", + "description": "driver is the name of the driver to use for this volume.", "type": "string" }, "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", "type": "string" }, "options": { - "description": "Optional: Extra command options if any.", + "description": "options is Optional: this field holds extra command options if any.", "type": "object", "additionalProperties": { "type": "string" } }, "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretRef": { - "description": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "description": "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" } } @@ -13088,11 +13202,11 @@ "type": "object", "properties": { "datasetName": { - "description": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", + "description": "datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", "type": "string" }, "datasetUUID": { - "description": "UUID of the dataset. This is unique identifier of a Flocker dataset", + "description": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", "type": "string" } } @@ -13105,19 +13219,19 @@ ], "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "string" }, "partition": { - "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "integer" }, "pdName": { - "description": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "type": "boolean" } } @@ -13146,15 +13260,15 @@ ], "properties": { "directory": { - "description": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", + "description": "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", "type": "string" }, "repository": { - "description": "Repository URL", + "description": "repository is the URL", "type": "string" }, "revision": { - "description": "Commit hash for the specified revision.", + "description": "revision is the commit hash for the specified revision.", "type": "string" } } @@ -13168,15 +13282,15 @@ ], "properties": { "endpoints": { - "description": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "string" }, "path": { - "description": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", "type": "boolean" } } @@ -13208,12 +13322,8 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" }, "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP.\n\nPossible enum values:\n - `\"HTTP\"` means that the scheme used will be http://\n - `\"HTTPS\"` means that the scheme used will be https://", - "type": "string", - "enum": [ - "HTTP", - "HTTPS" - ] + "description": "Scheme to use for connecting to the host. Defaults to HTTP.", + "type": "string" } } }, @@ -13226,7 +13336,7 @@ ], "properties": { "name": { - "description": "The header field name", + "description": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", "type": "string" }, "value": { @@ -13260,11 +13370,11 @@ ], "properties": { "path": { - "description": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "description": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "type": "string" }, "type": { - "description": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "description": "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "type": "string" } } @@ -13279,50 +13389,50 @@ ], "properties": { "chapAuthDiscovery": { - "description": "whether support iSCSI Discovery CHAP authentication", + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", "type": "boolean" }, "chapAuthSession": { - "description": "whether support iSCSI Session CHAP authentication", + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication", "type": "boolean" }, "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", "type": "string" }, "initiatorName": { - "description": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", + "description": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", "type": "string" }, "iqn": { - "description": "Target iSCSI Qualified Name.", + "description": "iqn is the target iSCSI Qualified Name.", "type": "string" }, "iscsiInterface": { - "description": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "description": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", "type": "string" }, "lun": { - "description": "iSCSI Target Lun number.", + "description": "lun represents iSCSI Target Lun number.", "type": "integer" }, "portals": { - "description": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "description": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "type": "array", "items": { "type": "string" } }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", "type": "boolean" }, "secretRef": { - "description": "CHAP Secret for iSCSI target and initiator authentication", + "description": "secretRef is the CHAP Secret for iSCSI target and initiator authentication", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "targetPortal": { - "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "type": "string" } } @@ -13336,15 +13446,15 @@ ], "properties": { "key": { - "description": "The key to project.", + "description": "key is the key to project.", "type": "string" }, "mode": { - "description": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "path": { - "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "description": "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", "type": "string" } } @@ -13401,15 +13511,15 @@ ], "properties": { "path": { - "description": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "boolean" }, "server": { - "description": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "type": "string" } } @@ -13461,16 +13571,8 @@ "type": "string" }, "operator": { - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\n\nPossible enum values:\n - `\"DoesNotExist\"`\n - `\"Exists\"`\n - `\"Gt\"`\n - `\"In\"`\n - `\"Lt\"`\n - `\"NotIn\"`", - "type": "string", - "enum": [ - "DoesNotExist", - "Exists", - "Gt", - "In", - "Lt", - "NotIn" - ] + "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "type": "string" }, "values": { "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", @@ -13572,11 +13674,11 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, "spec": { - "description": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "description": "spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec" }, "status": { - "description": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "description": "status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimStatus" } }, @@ -13589,7 +13691,7 @@ ] }, "io.k8s.api.core.v1.PersistentVolumeClaimCondition": { - "description": "PersistentVolumeClaimCondition contails details about state of pvc", + "description": "PersistentVolumeClaimCondition contains details about state of pvc", "type": "object", "required": [ "type", @@ -13597,31 +13699,26 @@ ], "properties": { "lastProbeTime": { - "description": "Last time we probed the condition.", + "description": "lastProbeTime is the time we probed the condition.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "lastTransitionTime": { - "description": "Last time the condition transitioned from one status to another.", + "description": "lastTransitionTime is the time the condition transitioned from one status to another.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, "message": { - "description": "Human-readable message indicating details about last transition.", + "description": "message is the human-readable message indicating details about last transition.", "type": "string" }, "reason": { - "description": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", "type": "string" }, "status": { "type": "string" }, "type": { - "description": "\n\n\nPossible enum values:\n - `\"FileSystemResizePending\"` - controller resize is finished and a file system resize is pending on node\n - `\"Resizing\"` - a user trigger resize of pvc has been started", - "type": "string", - "enum": [ - "FileSystemResizePending", - "Resizing" - ] + "type": "string" } } }, @@ -13630,30 +13727,30 @@ "type": "object", "properties": { "accessModes": { - "description": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "description": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "type": "array", "items": { "type": "string" } }, "dataSource": { - "description": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.", + "description": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference" }, "dataSourceRef": { - "description": "Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.", - "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference" + "description": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "$ref": "#/definitions/io.k8s.api.core.v1.TypedObjectReference" }, "resources": { - "description": "Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "description": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" }, "selector": { - "description": "A label query over volumes to consider for binding.", + "description": "selector is a label query over volumes to consider for binding.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, "storageClassName": { - "description": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "description": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", "type": "string" }, "volumeMode": { @@ -13661,7 +13758,7 @@ "type": "string" }, "volumeName": { - "description": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "description": "volumeName is the binding reference to the PersistentVolume backing this claim.", "type": "string" } } @@ -13671,28 +13768,28 @@ "type": "object", "properties": { "accessModes": { - "description": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "description": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "type": "array", "items": { "type": "string" } }, "allocatedResources": { - "description": "The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "description": "allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "type": "object", "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" } }, "capacity": { - "description": "Represents the actual resources of the underlying volume.", + "description": "capacity represents the actual resources of the underlying volume.", "type": "object", "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" } }, "conditions": { - "description": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimCondition" @@ -13701,16 +13798,11 @@ "x-kubernetes-patch-strategy": "merge" }, "phase": { - "description": "Phase represents the current phase of PersistentVolumeClaim.\n\nPossible enum values:\n - `\"Bound\"` used for PersistentVolumeClaims that are bound\n - `\"Lost\"` used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost.\n - `\"Pending\"` used for PersistentVolumeClaims that are not yet bound", - "type": "string", - "enum": [ - "Bound", - "Lost", - "Pending" - ] + "description": "phase represents the current phase of PersistentVolumeClaim.", + "type": "string" }, "resizeStatus": { - "description": "ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "description": "resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "type": "string" } } @@ -13740,11 +13832,11 @@ ], "properties": { "claimName": { - "description": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "description": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "type": "string" }, "readOnly": { - "description": "Will force the ReadOnly setting in VolumeMounts. Default false.", + "description": "readOnly Will force the ReadOnly setting in VolumeMounts. Default false.", "type": "boolean" } } @@ -13757,11 +13849,11 @@ ], "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "pdID": { - "description": "ID that identifies Photon Controller persistent disk", + "description": "pdID is the ID that identifies Photon Controller persistent disk", "type": "string" } } @@ -13798,11 +13890,11 @@ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, "namespaceSelector": { - "description": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.", + "description": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, "namespaces": { - "description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"", + "description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "type": "array", "items": { "type": "string" @@ -13907,7 +13999,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.SeccompProfile" }, "supplementalGroups": { - "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.", + "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", "type": "array", "items": { "type": "integer", @@ -13935,15 +14027,15 @@ ], "properties": { "fsType": { - "description": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "volumeID": { - "description": "VolumeID uniquely identifies a Portworx volume", + "description": "volumeID uniquely identifies a Portworx volume", "type": "string" } } @@ -13979,7 +14071,7 @@ "type": "integer" }, "grpc": { - "description": "GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.", + "description": "GRPC specifies an action involving a GRPC port.", "$ref": "#/definitions/io.k8s.api.core.v1.GRPCAction" }, "httpGet": { @@ -14017,11 +14109,11 @@ "type": "object", "properties": { "defaultMode": { - "description": "Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "sources": { - "description": "list of volume projections", + "description": "sources is the list of volume projections", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.VolumeProjection" @@ -14038,27 +14130,27 @@ ], "properties": { "group": { - "description": "Group to map volume access to Default is no group", + "description": "group to map volume access to Default is no group", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "description": "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", "type": "boolean" }, "registry": { - "description": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "description": "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", "type": "string" }, "tenant": { - "description": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", + "description": "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", "type": "string" }, "user": { - "description": "User to map volume access to Defaults to serivceaccount user", + "description": "user to map volume access to Defaults to serivceaccount user", "type": "string" }, "volume": { - "description": "Volume is a string that references an already created Quobyte volume by name.", + "description": "volume is a string that references an already created Quobyte volume by name.", "type": "string" } } @@ -14072,38 +14164,51 @@ ], "properties": { "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", "type": "string" }, "image": { - "description": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string" }, "keyring": { - "description": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string" }, "monitors": { - "description": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "array", "items": { "type": "string" } }, "pool": { - "description": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "string" }, "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "type": "boolean" }, "secretRef": { - "description": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "user": { - "description": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "description": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ResourceClaim": { + "description": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", "type": "string" } } @@ -14134,6 +14239,17 @@ "description": "ResourceRequirements describes the compute resource requirements.", "type": "object", "properties": { + "claims": { + "description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceClaim" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, "limits": { "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object", @@ -14142,7 +14258,7 @@ } }, "requests": { - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "type": "object", "additionalProperties": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" @@ -14182,43 +14298,43 @@ ], "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", "type": "string" }, "gateway": { - "description": "The host address of the ScaleIO API Gateway.", + "description": "gateway is the host address of the ScaleIO API Gateway.", "type": "string" }, "protectionDomain": { - "description": "The name of the ScaleIO Protection Domain for the configured storage.", + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretRef": { - "description": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "description": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "sslEnabled": { - "description": "Flag to enable/disable SSL communication with Gateway, default false", + "description": "sslEnabled Flag enable/disable SSL communication with Gateway, default false", "type": "boolean" }, "storageMode": { - "description": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", "type": "string" }, "storagePool": { - "description": "The ScaleIO Storage Pool associated with the protection domain.", + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", "type": "string" }, "system": { - "description": "The name of the storage system as configured in ScaleIO.", + "description": "system is the name of the storage system as configured in ScaleIO.", "type": "string" }, "volumeName": { - "description": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "description": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", "type": "string" } } @@ -14235,13 +14351,8 @@ "type": "string" }, "type": { - "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.\n\nPossible enum values:\n - `\"Localhost\"` indicates a profile defined in a file on the node should be used. The file's location relative to \u003ckubelet-root-dir\u003e/seccomp.\n - `\"RuntimeDefault\"` represents the default container runtime seccomp profile.\n - `\"Unconfined\"` indicates no seccomp profile is applied (A.K.A. unconfined).", - "type": "string", - "enum": [ - "Localhost", - "RuntimeDefault", - "Unconfined" - ] + "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", + "type": "string" } }, "x-kubernetes-unions": [ @@ -14294,7 +14405,7 @@ "type": "object", "properties": { "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" @@ -14305,7 +14416,7 @@ "type": "string" }, "optional": { - "description": "Specify whether the Secret or its key must be defined", + "description": "optional field specify whether the Secret or its key must be defined", "type": "boolean" } } @@ -14315,22 +14426,22 @@ "type": "object", "properties": { "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "description": "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", "type": "integer" }, "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "description": "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" } }, "optional": { - "description": "Specify whether the Secret or its keys must be defined", + "description": "optional field specify whether the Secret or its keys must be defined", "type": "boolean" }, "secretName": { - "description": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "description": "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "type": "string" } } @@ -14393,15 +14504,15 @@ ], "properties": { "audience": { - "description": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "description": "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", "type": "string" }, "expirationSeconds": { - "description": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "description": "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", "type": "integer" }, "path": { - "description": "Path is the path relative to the mount point of the file to project the token into.", + "description": "path is the path relative to the mount point of the file to project the token into.", "type": "string" } } @@ -14414,7 +14525,7 @@ ], "properties": { "appProtocol": { - "description": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "description": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", "type": "string" }, "name": { @@ -14430,13 +14541,8 @@ "type": "integer" }, "protocol": { - "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.\n\nPossible enum values:\n - `\"SCTP\"` is the SCTP protocol.\n - `\"TCP\"` is the TCP protocol.\n - `\"UDP\"` is the UDP protocol.", - "type": "string", - "enum": [ - "SCTP", - "TCP", - "UDP" - ] + "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + "type": "string" }, "targetPort": { "description": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", @@ -14449,23 +14555,23 @@ "type": "object", "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "type": "boolean" }, "secretRef": { - "description": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", + "description": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" }, "volumeName": { - "description": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "description": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", "type": "string" }, "volumeNamespace": { - "description": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", "type": "string" } } @@ -14510,25 +14616,16 @@ "type": "object", "properties": { "effect": { - "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n\nPossible enum values:\n - `\"NoExecute\"` Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController.\n - `\"NoSchedule\"` Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler.\n - `\"PreferNoSchedule\"` Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler.", - "type": "string", - "enum": [ - "NoExecute", - "NoSchedule", - "PreferNoSchedule" - ] + "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" }, "key": { "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", "type": "string" }, "operator": { - "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.\n\nPossible enum values:\n - `\"Equal\"`\n - `\"Exists\"`", - "type": "string", - "enum": [ - "Equal", - "Exists" - ] + "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "type": "string" }, "tolerationSeconds": { "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", @@ -14563,6 +14660,31 @@ }, "x-kubernetes-map-type": "atomic" }, + "io.k8s.api.core.v1.TypedObjectReference": { + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "type": "string" + } + } + }, "io.k8s.api.core.v1.Volume": { "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", "type": "object", @@ -14571,123 +14693,123 @@ ], "properties": { "awsElasticBlockStore": { - "description": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" }, "azureDisk": { - "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource" }, "azureFile": { - "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource" }, "cephfs": { - "description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource" }, "cinder": { - "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource" }, "configMap": { - "description": "ConfigMap represents a configMap that should populate this volume", + "description": "configMap represents a configMap that should populate this volume", "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource" }, "csi": { - "description": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource" }, "downwardAPI": { - "description": "DownwardAPI represents downward API about the pod that should populate this volume", + "description": "downwardAPI represents downward API about the pod that should populate this volume", "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource" }, "emptyDir": { - "description": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "description": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", "$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource" }, "ephemeral": { - "description": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + "description": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource" }, "fc": { - "description": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "description": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" }, "flexVolume": { - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource" }, "flocker": { - "description": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource" }, "gcePersistentDisk": { - "description": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" }, "gitRepo": { - "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "description": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource" }, "glusterfs": { - "description": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource" }, "hostPath": { - "description": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "description": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource" }, "iscsi": { - "description": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource" }, "name": { - "description": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "type": "string" }, "nfs": { - "description": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "description": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource" }, "persistentVolumeClaim": { - "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "description": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource" }, "photonPersistentDisk": { - "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" }, "portworxVolume": { - "description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource" }, "projected": { - "description": "Items for all in one resources secrets, configmaps, and downward API", + "description": "projected items for all in one resources secrets, configmaps, and downward API", "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource" }, "quobyte": { - "description": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource" }, "rbd": { - "description": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource" }, "scaleIO": { - "description": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource" }, "secret": { - "description": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "description": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource" }, "storageos": { - "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource" }, "vsphereVolume": { - "description": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" } } @@ -14749,19 +14871,19 @@ "type": "object", "properties": { "configMap": { - "description": "information about the configMap data to project", + "description": "configMap information about the configMap data to project", "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapProjection" }, "downwardAPI": { - "description": "information about the downwardAPI data to project", + "description": "downwardAPI information about the downwardAPI data to project", "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIProjection" }, "secret": { - "description": "information about the secret data to project", + "description": "secret information about the secret data to project", "$ref": "#/definitions/io.k8s.api.core.v1.SecretProjection" }, "serviceAccountToken": { - "description": "information about the serviceAccountToken data to project", + "description": "serviceAccountToken is information about the serviceAccountToken data to project", "$ref": "#/definitions/io.k8s.api.core.v1.ServiceAccountTokenProjection" } } @@ -14774,19 +14896,19 @@ ], "properties": { "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "description": "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "type": "string" }, "storagePolicyID": { - "description": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + "description": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", "type": "string" }, "storagePolicyName": { - "description": "Storage Policy Based Management (SPBM) profile name.", + "description": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.", "type": "string" }, "volumePath": { - "description": "Path that identifies vSphere volume vmdk", + "description": "volumePath is the path that identifies vSphere volume vmdk", "type": "string" } } @@ -14847,11 +14969,15 @@ "description": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", "x-kubernetes-patch-strategy": "replace", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "unhealthyPodEvictionPolicy": { + "description": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", + "type": "string" } } }, "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n\n\t(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "type": "string" }, "io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions": { @@ -14871,7 +14997,7 @@ }, "fieldValidation": { "type": "string", - "title": "fieldValidation instructs the server on how to handle\nobjects in the request (POST/PUT/PATCH) containing unknown\nor duplicate fields, provided that the `ServerSideFieldValidation`\nfeature gate is also enabled. Valid values are:\n- Ignore: This will ignore any unknown fields that are silently\ndropped from the object, and will ignore all but the last duplicate\nfield that the decoder encounters. This is the default behavior\nprior to v1.23 and is the default behavior when the\n`ServerSideFieldValidation` feature gate is disabled.\n- Warn: This will send a warning via the standard warning response\nheader for each unknown field that is dropped from the object, and\nfor each duplicate field that is encountered. The request will\nstill succeed if there are no other errors, and will only persist\nthe last of any duplicate fields. This is the default when the\n`ServerSideFieldValidation` feature gate is enabled.\n- Strict: This will fail the request with a BadRequest error if\nany unknown fields would be dropped from the object, or if any\nduplicate fields are present. The error returned from the server\nwill contain all unknown and duplicate fields encountered.\n+optional" + "title": "fieldValidation instructs the server on how to handle\nobjects in the request (POST/PUT/PATCH) containing unknown\nor duplicate fields. Valid values are:\n- Ignore: This will ignore any unknown fields that are silently\ndropped from the object, and will ignore all but the last duplicate\nfield that the decoder encounters. This is the default behavior\nprior to v1.23.\n- Warn: This will send a warning via the standard warning response\nheader for each unknown field that is dropped from the object, and\nfor each duplicate field that is encountered. The request will\nstill succeed if there are no other errors, and will only persist\nthe last of any duplicate fields. This is the default in v1.23+\n- Strict: This will fail the request with a BadRequest error if\nany unknown fields would be dropped from the object, or if any\nduplicate fields are present. The error returned from the server\nwill contain all unknown and duplicate fields encountered.\n+optional" } } }, @@ -14969,7 +15095,7 @@ "type": "string" }, "selfLink": { - "description": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", "type": "string" } } @@ -15003,7 +15129,7 @@ "type": "string" }, "time": { - "description": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", + "description": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" } } @@ -15018,16 +15144,12 @@ "type": "object", "properties": { "annotations": { - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "type": "object", "additionalProperties": { "type": "string" } }, - "clusterName": { - "description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "type": "string" - }, "creationTimestamp": { "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" @@ -15049,7 +15171,7 @@ "x-kubernetes-patch-strategy": "merge" }, "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "type": "string" }, "generation": { @@ -15057,7 +15179,7 @@ "type": "integer" }, "labels": { - "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", "type": "object", "additionalProperties": { "type": "string" @@ -15071,11 +15193,11 @@ } }, "name": { - "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "type": "string" }, "namespace": { - "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", "type": "string" }, "ownerReferences": { @@ -15092,11 +15214,11 @@ "type": "string" }, "selfLink": { - "description": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", "type": "string" }, "uid": { - "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "type": "string" } } @@ -15116,7 +15238,7 @@ "type": "string" }, "blockOwnerDeletion": { - "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", "type": "boolean" }, "controller": { @@ -15128,11 +15250,11 @@ "type": "string" }, "name": { - "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "type": "string" }, "uid": { - "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "type": "string" } }, diff --git a/docs/executor_swagger.md b/docs/executor_swagger.md index e159c9f26106..f405b099ed9c 100644 --- a/docs/executor_swagger.md +++ b/docs/executor_swagger.md @@ -840,6 +840,10 @@ Cannot be updated. +listMapKey=containerPort +listMapKey=protocol | | | readinessProbe | [Probe](#probe)| `Probe` | | | | | +| resizePolicy | [][ContainerResizePolicy](#container-resize-policy)| `[]*ContainerResizePolicy` | | | Resources resize policy for the container. ++featureGate=InPlacePodVerticalScaling ++optional ++listType=atomic | | | resources | [ResourceRequirements](#resource-requirements)| `ResourceRequirements` | | | | | | securityContext | [SecurityContext](#security-context)| `SecurityContext` | | | | | | startupProbe | [Probe](#probe)| `Probe` | | | | | @@ -951,6 +955,10 @@ Cannot be updated. +listMapKey=containerPort +listMapKey=protocol | | | readinessProbe | [Probe](#probe)| `Probe` | | | | | +| resizePolicy | [][ContainerResizePolicy](#container-resize-policy)| `[]*ContainerResizePolicy` | | | Resources resize policy for the container. ++featureGate=InPlacePodVerticalScaling ++optional ++listType=atomic | | | resources | [ResourceRequirements](#resource-requirements)| `ResourceRequirements` | | | | | | securityContext | [SecurityContext](#security-context)| `SecurityContext` | | | | | | startupProbe | [Probe](#probe)| `Probe` | | | | | @@ -1023,6 +1031,22 @@ referred to by services. +### ContainerResizePolicy + + + + + + +**Properties** + +| Name | Type | Go type | Required | Default | Description | Example | +|------|------|---------|:--------:| ------- |-------------|---------| +| resourceName | [ResourceName](#resource-name)| `ResourceName` | | | | | +| restartPolicy | [ResourceResizeRestartPolicy](#resource-resize-restart-policy)| `ResourceResizeRestartPolicy` | | | | | + + + ### ContainerSetRetryStrategy @@ -2830,7 +2854,7 @@ otherwise 422 (Unprocessable Entity) will be returned. | kind | string| `string` | | | Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | name | string| `string` | | | Name of the referent. -More info: http://kubernetes.io/docs/user-guide/identifiers#names | | +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names | | | uid | [UID](#uid)| `UID` | | | | | @@ -2933,7 +2957,7 @@ PersistentVolumeClaim objects as part of an EphemeralVolumeSource. | annotations | map of string| `map[string]string` | | | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. -More info: http://kubernetes.io/docs/user-guide/annotations +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations +optional | | | creationTimestamp | [Time](#time)| `Time` | | | | | | deletionGracePeriodSeconds | int64 (formatted integer)| `int64` | | | Number of seconds allowed for this object to gracefully terminate before @@ -2976,7 +3000,7 @@ Populated by the system. Read-only. | labels | map of string| `map[string]string` | | | Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. -More info: http://kubernetes.io/docs/user-guide/labels +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels +optional | | | managedFields | [][ManagedFieldsEntry](#managed-fields-entry)| `[]*ManagedFieldsEntry` | | | ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal @@ -2992,7 +3016,7 @@ some resources may allow a client to request the generation of an appropriate na automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/identifiers#names +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names +optional | | | namespace | string| `string` | | | Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. @@ -3001,7 +3025,7 @@ those objects will be empty. Must be a DNS_LABEL. Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/namespaces +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces +optional | | | ownerReferences | [][OwnerReference](#owner-reference)| `[]*OwnerReference` | | | List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, @@ -3686,6 +3710,17 @@ inside a container. | | [ResourceList](#resource-list) +### ResourceName + + + + +| Name | Type | Go type | Default | Description | Example | +|------|------|---------| ------- |-------------|---------| +| ResourceName | string| string | | | | + + + ### ResourceRequirements @@ -3714,6 +3749,17 @@ This field is immutable. It can only be set for containers. +### ResourceResizeRestartPolicy + + + + +| Name | Type | Go type | Default | Description | Example | +|------|------|---------| ------- |-------------|---------| +| ResourceResizeRestartPolicy | string| string | | | | + + + ### ResourceTemplate @@ -3989,6 +4035,10 @@ Cannot be updated. +listMapKey=containerPort +listMapKey=protocol | | | readinessProbe | [Probe](#probe)| `Probe` | | | | | +| resizePolicy | [][ContainerResizePolicy](#container-resize-policy)| `[]*ContainerResizePolicy` | | | Resources resize policy for the container. ++featureGate=InPlacePodVerticalScaling ++optional ++listType=atomic | | | resources | [ResourceRequirements](#resource-requirements)| `ResourceRequirements` | | | | | | securityContext | [SecurityContext](#security-context)| `SecurityContext` | | | | | | source | string| `string` | | | Source contains the source code of the script to execute | | @@ -4838,6 +4888,10 @@ Cannot be updated. +listMapKey=containerPort +listMapKey=protocol | | | readinessProbe | [Probe](#probe)| `Probe` | | | | | +| resizePolicy | [][ContainerResizePolicy](#container-resize-policy)| `[]*ContainerResizePolicy` | | | Resources resize policy for the container. ++featureGate=InPlacePodVerticalScaling ++optional ++listType=atomic | | | resources | [ResourceRequirements](#resource-requirements)| `ResourceRequirements` | | | | | | securityContext | [SecurityContext](#security-context)| `SecurityContext` | | | | | | startupProbe | [Probe](#probe)| `Probe` | | | | | diff --git a/docs/fields.md b/docs/fields.md index 2eda10c6e4e8..bbe08ed42fbd 100644 --- a/docs/fields.md +++ b/docs/fields.md @@ -2592,6 +2592,7 @@ UserContainer is a container specified by a user. |`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| |`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| |`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| |`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| |`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| |`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| @@ -2925,6 +2926,7 @@ ScriptTemplate is a template subtype to enable scripting through code steps |`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| |`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| |`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| |`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| |`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| |`source`|`string`|Source contains the source code of the script to execute| @@ -3734,6 +3736,7 @@ _No description available_ |`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| |`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| |`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| |`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| |`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| |`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| @@ -4887,22 +4890,21 @@ ObjectMeta is metadata that all persisted resources must have, which includes al ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`annotations`|`Map< string , string >`|Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations| -|`clusterName`|`string`|The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.| +|`annotations`|`Map< string , string >`|Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations| |`creationTimestamp`|[`Time`](#time)|CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| |`deletionGracePeriodSeconds`|`integer`|Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.| |`deletionTimestamp`|[`Time`](#time)|DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| |`finalizers`|`Array< string >`|Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.| -|`generateName`|`string`|GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency| +|`generateName`|`string`|GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency| |`generation`|`integer`|A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.| -|`labels`|`Map< string , string >`|Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels| +|`labels`|`Map< string , string >`|Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels| |`managedFields`|`Array<`[`ManagedFieldsEntry`](#managedfieldsentry)`>`|ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object.| -|`name`|`string`|Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names| -|`namespace`|`string`|Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces| +|`name`|`string`|Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names| +|`namespace`|`string`|Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces| |`ownerReferences`|`Array<`[`OwnerReference`](#ownerreference)`>`|List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.| |`resourceVersion`|`string`|An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency| -|~~`selfLink`~~|~~`string`~~|~~SelfLink is a URL representing this object. Populated by the system. Read-only.~~ DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.| -|`uid`|`string`|UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids| +|`selfLink`|`string`|Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.| +|`uid`|`string`|UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids| ## Affinity @@ -4973,6 +4975,7 @@ PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. |`maxUnavailable`|[`IntOrString`](#intorstring)|An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable".| |`minAvailable`|[`IntOrString`](#intorstring)|An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".| |`selector`|[`LabelSelector`](#labelselector)|Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.| +|`unhealthyPodEvictionPolicy`|`string`|UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type="Ready",status="True". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase="Running"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase="Running"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).| ## PodSecurityContext @@ -4994,7 +4997,7 @@ PodSecurityContext holds pod-level security attributes and common container sett |`runAsUser`|`integer`|The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| |`seLinuxOptions`|[`SELinuxOptions`](#selinuxoptions)|The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.| |`seccompProfile`|[`SeccompProfile`](#seccompprofile)|The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.| -|`supplementalGroups`|`Array< integer >`|A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.| +|`supplementalGroups`|`Array< integer >`|A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.| |`sysctls`|`Array<`[`Sysctl`](#sysctl)`>`|Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.| |`windowsOptions`|[`WindowsSecurityContextOptions`](#windowssecuritycontextoptions)|The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.| @@ -5005,9 +5008,9 @@ The pod this Toleration is attached to tolerates any taint that matches the trip ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`effect`|`string`|Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. Possible enum values: - `"NoExecute"` Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController. - `"NoSchedule"` Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler. - `"PreferNoSchedule"` Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler.| +|`effect`|`string`|Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.| |`key`|`string`|Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.| -|`operator`|`string`|Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. Possible enum values: - `"Equal"` - `"Exists"`| +|`operator`|`string`|Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.| |`tolerationSeconds`|`integer`|TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.| |`value`|`string`|Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.| @@ -5039,8 +5042,8 @@ PersistentVolumeClaim is a user's request for and claim to a persistent volume |`apiVersion`|`string`|APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources| |`kind`|`string`|Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| |`metadata`|[`ObjectMeta`](#objectmeta)|Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata| -|`spec`|[`PersistentVolumeClaimSpec`](#persistentvolumeclaimspec)|Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| -|`status`|[`PersistentVolumeClaimStatus`](#persistentvolumeclaimstatus)|Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| +|`spec`|[`PersistentVolumeClaimSpec`](#persistentvolumeclaimspec)|spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| +|`status`|[`PersistentVolumeClaimStatus`](#persistentvolumeclaimstatus)|status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| ## Volume @@ -5067,36 +5070,36 @@ Volume represents a named volume in a pod that may be accessed by any container ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`awsElasticBlockStore`|[`AWSElasticBlockStoreVolumeSource`](#awselasticblockstorevolumesource)|AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| -|`azureDisk`|[`AzureDiskVolumeSource`](#azurediskvolumesource)|AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.| -|`azureFile`|[`AzureFileVolumeSource`](#azurefilevolumesource)|AzureFile represents an Azure File Service mount on the host and bind mount to the pod.| -|`cephfs`|[`CephFSVolumeSource`](#cephfsvolumesource)|CephFS represents a Ceph FS mount on the host that shares a pod's lifetime| -|`cinder`|[`CinderVolumeSource`](#cindervolumesource)|Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| -|`configMap`|[`ConfigMapVolumeSource`](#configmapvolumesource)|ConfigMap represents a configMap that should populate this volume| -|`csi`|[`CSIVolumeSource`](#csivolumesource)|CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).| -|`downwardAPI`|[`DownwardAPIVolumeSource`](#downwardapivolumesource)|DownwardAPI represents downward API about the pod that should populate this volume| -|`emptyDir`|[`EmptyDirVolumeSource`](#emptydirvolumesource)|EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| -|`ephemeral`|[`EphemeralVolumeSource`](#ephemeralvolumesource)|Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. A pod can use both types of ephemeral volumes and persistent volumes at the same time.| -|`fc`|[`FCVolumeSource`](#fcvolumesource)|FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.| -|`flexVolume`|[`FlexVolumeSource`](#flexvolumesource)|FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.| -|`flocker`|[`FlockerVolumeSource`](#flockervolumesource)|Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running| -|`gcePersistentDisk`|[`GCEPersistentDiskVolumeSource`](#gcepersistentdiskvolumesource)|GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|~~`gitRepo`~~|~~[`GitRepoVolumeSource`](#gitrepovolumesource)~~|~~GitRepo represents a git repository at a particular revision.~~ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.| -|`glusterfs`|[`GlusterfsVolumeSource`](#glusterfsvolumesource)|Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md| -|`hostPath`|[`HostPathVolumeSource`](#hostpathvolumesource)|HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| -|`iscsi`|[`ISCSIVolumeSource`](#iscsivolumesource)|ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md| -|`name`|`string`|Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`nfs`|[`NFSVolumeSource`](#nfsvolumesource)|NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| -|`persistentVolumeClaim`|[`PersistentVolumeClaimVolumeSource`](#persistentvolumeclaimvolumesource)|PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| -|`photonPersistentDisk`|[`PhotonPersistentDiskVolumeSource`](#photonpersistentdiskvolumesource)|PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine| -|`portworxVolume`|[`PortworxVolumeSource`](#portworxvolumesource)|PortworxVolume represents a portworx volume attached and mounted on kubelets host machine| -|`projected`|[`ProjectedVolumeSource`](#projectedvolumesource)|Items for all in one resources secrets, configmaps, and downward API| -|`quobyte`|[`QuobyteVolumeSource`](#quobytevolumesource)|Quobyte represents a Quobyte mount on the host that shares a pod's lifetime| -|`rbd`|[`RBDVolumeSource`](#rbdvolumesource)|RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md| -|`scaleIO`|[`ScaleIOVolumeSource`](#scaleiovolumesource)|ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.| -|`secret`|[`SecretVolumeSource`](#secretvolumesource)|Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| -|`storageos`|[`StorageOSVolumeSource`](#storageosvolumesource)|StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.| -|`vsphereVolume`|[`VsphereVirtualDiskVolumeSource`](#vspherevirtualdiskvolumesource)|VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine| +|`awsElasticBlockStore`|[`AWSElasticBlockStoreVolumeSource`](#awselasticblockstorevolumesource)|awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| +|`azureDisk`|[`AzureDiskVolumeSource`](#azurediskvolumesource)|azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.| +|`azureFile`|[`AzureFileVolumeSource`](#azurefilevolumesource)|azureFile represents an Azure File Service mount on the host and bind mount to the pod.| +|`cephfs`|[`CephFSVolumeSource`](#cephfsvolumesource)|cephFS represents a Ceph FS mount on the host that shares a pod's lifetime| +|`cinder`|[`CinderVolumeSource`](#cindervolumesource)|cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| +|`configMap`|[`ConfigMapVolumeSource`](#configmapvolumesource)|configMap represents a configMap that should populate this volume| +|`csi`|[`CSIVolumeSource`](#csivolumesource)|csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).| +|`downwardAPI`|[`DownwardAPIVolumeSource`](#downwardapivolumesource)|downwardAPI represents downward API about the pod that should populate this volume| +|`emptyDir`|[`EmptyDirVolumeSource`](#emptydirvolumesource)|emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| +|`ephemeral`|[`EphemeralVolumeSource`](#ephemeralvolumesource)|ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. A pod can use both types of ephemeral volumes and persistent volumes at the same time.| +|`fc`|[`FCVolumeSource`](#fcvolumesource)|fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.| +|`flexVolume`|[`FlexVolumeSource`](#flexvolumesource)|flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.| +|`flocker`|[`FlockerVolumeSource`](#flockervolumesource)|flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running| +|`gcePersistentDisk`|[`GCEPersistentDiskVolumeSource`](#gcepersistentdiskvolumesource)|gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|~~`gitRepo`~~|~~[`GitRepoVolumeSource`](#gitrepovolumesource)~~|~~gitRepo represents a git repository at a particular revision.~~ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.| +|`glusterfs`|[`GlusterfsVolumeSource`](#glusterfsvolumesource)|glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md| +|`hostPath`|[`HostPathVolumeSource`](#hostpathvolumesource)|hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| +|`iscsi`|[`ISCSIVolumeSource`](#iscsivolumesource)|iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md| +|`name`|`string`|name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| +|`nfs`|[`NFSVolumeSource`](#nfsvolumesource)|nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| +|`persistentVolumeClaim`|[`PersistentVolumeClaimVolumeSource`](#persistentvolumeclaimvolumesource)|persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| +|`photonPersistentDisk`|[`PhotonPersistentDiskVolumeSource`](#photonpersistentdiskvolumesource)|photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine| +|`portworxVolume`|[`PortworxVolumeSource`](#portworxvolumesource)|portworxVolume represents a portworx volume attached and mounted on kubelets host machine| +|`projected`|[`ProjectedVolumeSource`](#projectedvolumesource)|projected items for all in one resources secrets, configmaps, and downward API| +|`quobyte`|[`QuobyteVolumeSource`](#quobytevolumesource)|quobyte represents a Quobyte mount on the host that shares a pod's lifetime| +|`rbd`|[`RBDVolumeSource`](#rbdvolumesource)|rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md| +|`scaleIO`|[`ScaleIOVolumeSource`](#scaleiovolumesource)|scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.| +|`secret`|[`SecretVolumeSource`](#secretvolumesource)|secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| +|`storageos`|[`StorageOSVolumeSource`](#storageosvolumesource)|storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.| +|`vsphereVolume`|[`VsphereVirtualDiskVolumeSource`](#vspherevirtualdiskvolumesource)|vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine| ## Time @@ -5473,24 +5476,25 @@ A single application container that you want to run within a pod. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`args`|`Array< string >`|Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| -|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`args`|`Array< string >`|Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| +|`command`|`Array< string >`|Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell| |`env`|`Array<`[`EnvVar`](#envvar)`>`|List of environment variables to set in the container. Cannot be updated.| |`envFrom`|`Array<`[`EnvFromSource`](#envfromsource)`>`|List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.| -|`image`|`string`|Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| -|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images Possible enum values: - `"Always"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - `"IfNotPresent"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - `"Never"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present| +|`image`|`string`|Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.| +|`imagePullPolicy`|`string`|Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images| |`lifecycle`|[`Lifecycle`](#lifecycle)|Actions that the management system should take in response to container lifecycle events. Cannot be updated.| |`livenessProbe`|[`Probe`](#probe)|Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| |`name`|`string`|Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.| -|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.| +|`ports`|`Array<`[`ContainerPort`](#containerport)`>`|List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.| |`readinessProbe`|[`Probe`](#probe)|Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| +|`resizePolicy`|`Array<`[`ContainerResizePolicy`](#containerresizepolicy)`>`|Resources resize policy for the container.| |`resources`|[`ResourceRequirements`](#resourcerequirements)|Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| |`securityContext`|[`SecurityContext`](#securitycontext)|SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/| |`startupProbe`|[`Probe`](#probe)|StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| |`stdin`|`boolean`|Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.| |`stdinOnce`|`boolean`|Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false| |`terminationMessagePath`|`string`|Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.| -|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. Possible enum values: - `"FallbackToLogsOnError"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents. - `"File"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits.| +|`terminationMessagePolicy`|`string`|Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.| |`tty`|`boolean`|Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.| |`volumeDevices`|`Array<`[`VolumeDevice`](#volumedevice)`>`|volumeDevices is the list of block devices to be used by the container.| |`volumeMounts`|`Array<`[`VolumeMount`](#volumemount)`>`|Pod volumes to mount into the container's filesystem. Cannot be updated.| @@ -5618,7 +5622,7 @@ Probe describes a health check to be performed against a container to determine |:----------:|:----------:|---------------| |`exec`|[`ExecAction`](#execaction)|Exec specifies the action to take.| |`failureThreshold`|`integer`|Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.| -|`grpc`|[`GRPCAction`](#grpcaction)|GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.| +|`grpc`|[`GRPCAction`](#grpcaction)|GRPC specifies an action involving a GRPC port.| |`httpGet`|[`HTTPGetAction`](#httpgetaction)|HTTPGet specifies the http request to perform.| |`initialDelaySeconds`|`integer`|Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes| |`periodSeconds`|`integer`|How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.| @@ -5638,7 +5642,17 @@ ContainerPort represents a network port in a single container. |`hostIP`|`string`|What host IP to bind the external port to.| |`hostPort`|`integer`|Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.| |`name`|`string`|If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.| -|`protocol`|`string`|Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". Possible enum values: - `"SCTP"` is the SCTP protocol. - `"TCP"` is the TCP protocol. - `"UDP"` is the UDP protocol.| +|`protocol`|`string`|Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".| + +## ContainerResizePolicy + +ContainerResizePolicy represents resource resize policy for the container. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`resourceName`|`string`|Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.| +|`restartPolicy`|`string`|Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.| ## ResourceRequirements @@ -5673,8 +5687,9 @@ ResourceRequirements describes the compute resource requirements. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| +|`claims`|`Array<`[`ResourceClaim`](#resourceclaim)`>`|Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers.| |`limits`|[`Quantity`](#quantity)|Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| -|`requests`|[`Quantity`](#quantity)|Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| +|`requests`|[`Quantity`](#quantity)|Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/| ## SecurityContext @@ -5743,7 +5758,7 @@ ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the res |`manager`|`string`|Manager is an identifier of the workflow managing these fields.| |`operation`|`string`|Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.| |`subresource`|`string`|Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.| -|`time`|[`Time`](#time)|Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'| +|`time`|[`Time`](#time)|Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.| ## OwnerReference @@ -5753,11 +5768,11 @@ OwnerReference contains enough information to let you identify an owning object. | Field Name | Field Type | Description | |:----------:|:----------:|---------------| |`apiVersion`|`string`|API version of the referent.| -|`blockOwnerDeletion`|`boolean`|If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.| +|`blockOwnerDeletion`|`boolean`|If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.| |`controller`|`boolean`|If true, this reference points to the managing controller.| |`kind`|`string`|Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds| -|`name`|`string`|Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names| -|`uid`|`string`|UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids| +|`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names| +|`uid`|`string`|UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids| ## NodeAffinity @@ -5825,7 +5840,7 @@ SeccompProfile defines a pod/container's seccomp profile settings. Only one prof | Field Name | Field Type | Description | |:----------:|:----------:|---------------| |`localhostProfile`|`string`|localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".| -|`type`|`string`|type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Possible enum values: - `"Localhost"` indicates a profile defined in a file on the node should be used. The file's location relative to /seccomp. - `"RuntimeDefault"` represents the default container runtime seccomp profile. - `"Unconfined"` indicates no seccomp profile is applied (A.K.A. unconfined).| +|`type`|`string`|type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.| ## Sysctl @@ -6218,14 +6233,14 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`accessModes`|`Array< string >`|AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| -|`dataSource`|[`TypedLocalObjectReference`](#typedlocalobjectreference)|This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.| -|`dataSourceRef`|[`TypedLocalObjectReference`](#typedlocalobjectreference)|Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.| -|`resources`|[`ResourceRequirements`](#resourcerequirements)|Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources| -|`selector`|[`LabelSelector`](#labelselector)|A label query over volumes to consider for binding.| -|`storageClassName`|`string`|Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1| +|`accessModes`|`Array< string >`|accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| +|`dataSource`|[`TypedLocalObjectReference`](#typedlocalobjectreference)|dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.| +|`dataSourceRef`|[`TypedObjectReference`](#typedobjectreference)|dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.| +|`resources`|[`ResourceRequirements`](#resourcerequirements)|resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources| +|`selector`|[`LabelSelector`](#labelselector)|selector is a label query over volumes to consider for binding.| +|`storageClassName`|`string`|storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1| |`volumeMode`|`string`|volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.| -|`volumeName`|`string`|VolumeName is the binding reference to the PersistentVolume backing this claim.| +|`volumeName`|`string`|volumeName is the binding reference to the PersistentVolume backing this claim.| ## PersistentVolumeClaimStatus @@ -6234,12 +6249,12 @@ PersistentVolumeClaimStatus is the current status of a persistent volume claim. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`accessModes`|`Array< string >`|AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| -|`allocatedResources`|[`Quantity`](#quantity)|The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| -|`capacity`|[`Quantity`](#quantity)|Represents the actual resources of the underlying volume.| -|`conditions`|`Array<`[`PersistentVolumeClaimCondition`](#persistentvolumeclaimcondition)`>`|Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.| -|`phase`|`string`|Phase represents the current phase of PersistentVolumeClaim. Possible enum values: - `"Bound"` used for PersistentVolumeClaims that are bound - `"Lost"` used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost. - `"Pending"` used for PersistentVolumeClaims that are not yet bound| -|`resizeStatus`|`string`|ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| +|`accessModes`|`Array< string >`|accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1| +|`allocatedResources`|[`Quantity`](#quantity)|allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| +|`capacity`|[`Quantity`](#quantity)|capacity represents the actual resources of the underlying volume.| +|`conditions`|`Array<`[`PersistentVolumeClaimCondition`](#persistentvolumeclaimcondition)`>`|conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.| +|`phase`|`string`|phase represents the current phase of PersistentVolumeClaim.| +|`resizeStatus`|`string`|resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.| ## AWSElasticBlockStoreVolumeSource @@ -6248,10 +6263,10 @@ Represents a Persistent Disk resource in AWS. An AWS EBS disk must exist before ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| -|`partition`|`integer`|The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).| -|`readOnly`|`boolean`|Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| -|`volumeID`|`string`|Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| +|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| +|`partition`|`integer`|partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).| +|`readOnly`|`boolean`|readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| +|`volumeID`|`string`|volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore| ## AzureDiskVolumeSource @@ -6260,12 +6275,12 @@ AzureDisk represents an Azure Data Disk mount on the host and bind mount to the ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`cachingMode`|`string`|Host Caching mode: None, Read Only, Read Write.| -|`diskName`|`string`|The Name of the data disk in the blob storage| -|`diskURI`|`string`|The URI the data disk in the blob storage| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`kind`|`string`|Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared| -|`readOnly`|`boolean`|Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`cachingMode`|`string`|cachingMode is the Host Caching mode: None, Read Only, Read Write.| +|`diskName`|`string`|diskName is the Name of the data disk in the blob storage| +|`diskURI`|`string`|diskURI is the URI of data disk in the blob storage| +|`fsType`|`string`|fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`kind`|`string`|kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared| +|`readOnly`|`boolean`|readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| ## AzureFileVolumeSource @@ -6274,9 +6289,9 @@ AzureFile represents an Azure File Service mount on the host and bind mount to t ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`readOnly`|`boolean`|Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretName`|`string`|the name of secret that contains Azure Storage Account Name and Key| -|`shareName`|`string`|Share Name| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretName`|`string`|secretName is the name of secret that contains Azure Storage Account Name and Key| +|`shareName`|`string`|shareName is the azure share Name| ## CephFSVolumeSource @@ -6285,12 +6300,12 @@ Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volum ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`monitors`|`Array< string >`|Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`path`|`string`|Optional: Used as the mounted root, rather than the full Ceph tree, default is /| -|`readOnly`|`boolean`|Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`secretFile`|`string`|Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| -|`user`|`string`|Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`monitors`|`Array< string >`|monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`path`|`string`|path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /| +|`readOnly`|`boolean`|readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`secretFile`|`string`|secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| +|`user`|`string`|user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it| ## CinderVolumeSource @@ -6299,10 +6314,10 @@ Represents a cinder volume resource in Openstack. A Cinder volume must exist bef ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| -|`readOnly`|`boolean`|Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|Optional: points to a secret object containing parameters used to connect to OpenStack.| -|`volumeID`|`string`|volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.| +|`volumeID`|`string`|volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md| ## ConfigMapVolumeSource @@ -6317,10 +6332,10 @@ Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data fi ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`defaultMode`|`integer`|Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`defaultMode`|`integer`|defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| |`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|Specify whether the ConfigMap or its keys must be defined| +|`optional`|`boolean`|optional specify whether the ConfigMap or its keys must be defined| ## CSIVolumeSource @@ -6329,11 +6344,11 @@ Represents a source location of a volume to mount, managed by an external CSI dr ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`driver`|`string`|Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.| -|`fsType`|`string`|Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.| -|`nodePublishSecretRef`|[`LocalObjectReference`](#localobjectreference)|NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.| -|`readOnly`|`boolean`|Specifies a read-only configuration for the volume. Defaults to false (read/write).| -|`volumeAttributes`|`Map< string , string >`|VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.| +|`driver`|`string`|driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.| +|`fsType`|`string`|fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.| +|`nodePublishSecretRef`|[`LocalObjectReference`](#localobjectreference)|nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.| +|`readOnly`|`boolean`|readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).| +|`volumeAttributes`|`Map< string , string >`|volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.| ## DownwardAPIVolumeSource @@ -6364,8 +6379,8 @@ Represents an empty directory for a pod. Empty directory volumes support ownersh ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`medium`|`string`|What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| -|`sizeLimit`|[`Quantity`](#quantity)|Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir| +|`medium`|`string`|medium represents what type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| +|`sizeLimit`|[`Quantity`](#quantity)|sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir| ## EphemeralVolumeSource @@ -6383,11 +6398,11 @@ Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`lun`|`integer`|Optional: FC target lun number| -|`readOnly`|`boolean`|Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`targetWWNs`|`Array< string >`|Optional: FC target worldwide names (WWNs)| -|`wwids`|`Array< string >`|Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`lun`|`integer`|lun is Optional: FC target lun number| +|`readOnly`|`boolean`|readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`targetWWNs`|`Array< string >`|targetWWNs is Optional: FC target worldwide names (WWNs)| +|`wwids`|`Array< string >`|wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.| ## FlexVolumeSource @@ -6396,11 +6411,11 @@ FlexVolume represents a generic volume resource that is provisioned/attached usi ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`driver`|`string`|Driver is the name of the driver to use for this volume.| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.| -|`options`|`Map< string , string >`|Optional: Extra command options if any.| -|`readOnly`|`boolean`|Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.| +|`driver`|`string`|driver is the name of the driver to use for this volume.| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.| +|`options`|`Map< string , string >`|options is Optional: this field holds extra command options if any.| +|`readOnly`|`boolean`|readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.| ## FlockerVolumeSource @@ -6409,8 +6424,8 @@ Represents a Flocker volume mounted by the Flocker agent. One and only one of da ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`datasetName`|`string`|Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated| -|`datasetUUID`|`string`|UUID of the dataset. This is unique identifier of a Flocker dataset| +|`datasetName`|`string`|datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated| +|`datasetUUID`|`string`|datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset| ## GCEPersistentDiskVolumeSource @@ -6419,10 +6434,10 @@ Represents a Persistent Disk resource in Google Compute Engine. A GCE PD must ex ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|`partition`|`integer`|The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|`pdName`|`string`|Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| -|`readOnly`|`boolean`|ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|`fsType`|`string`|fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|`partition`|`integer`|partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|`pdName`|`string`|pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| +|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk| ## GitRepoVolumeSource @@ -6431,9 +6446,9 @@ Represents a volume that is populated with the contents of a git repository. Git ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`directory`|`string`|Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.| -|`repository`|`string`|Repository URL| -|`revision`|`string`|Commit hash for the specified revision.| +|`directory`|`string`|directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.| +|`repository`|`string`|repository is the URL| +|`revision`|`string`|revision is the commit hash for the specified revision.| ## GlusterfsVolumeSource @@ -6442,9 +6457,9 @@ Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`endpoints`|`string`|EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| -|`path`|`string`|Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| -|`readOnly`|`boolean`|ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| +|`endpoints`|`string`|endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| +|`path`|`string`|path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| +|`readOnly`|`boolean`|readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod| ## HostPathVolumeSource @@ -6453,8 +6468,8 @@ Represents a host path mapped into a pod. Host path volumes do not support owner ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`path`|`string`|Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| -|`type`|`string`|Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| +|`path`|`string`|path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| +|`type`|`string`|type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath| ## ISCSIVolumeSource @@ -6463,17 +6478,17 @@ Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`chapAuthDiscovery`|`boolean`|whether support iSCSI Discovery CHAP authentication| -|`chapAuthSession`|`boolean`|whether support iSCSI Session CHAP authentication| -|`fsType`|`string`|Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi| -|`initiatorName`|`string`|Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.| -|`iqn`|`string`|Target iSCSI Qualified Name.| -|`iscsiInterface`|`string`|iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).| -|`lun`|`integer`|iSCSI Target Lun number.| -|`portals`|`Array< string >`|iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| -|`readOnly`|`boolean`|ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|CHAP Secret for iSCSI target and initiator authentication| -|`targetPortal`|`string`|iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| +|`chapAuthDiscovery`|`boolean`|chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication| +|`chapAuthSession`|`boolean`|chapAuthSession defines whether support iSCSI Session CHAP authentication| +|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi| +|`initiatorName`|`string`|initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.| +|`iqn`|`string`|iqn is the target iSCSI Qualified Name.| +|`iscsiInterface`|`string`|iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).| +|`lun`|`integer`|lun represents iSCSI Target Lun number.| +|`portals`|`Array< string >`|portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| +|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is the CHAP Secret for iSCSI target and initiator authentication| +|`targetPortal`|`string`|targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).| ## NFSVolumeSource @@ -6482,9 +6497,9 @@ Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not sup ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`path`|`string`|Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| -|`readOnly`|`boolean`|ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| -|`server`|`string`|Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| +|`path`|`string`|path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| +|`readOnly`|`boolean`|readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| +|`server`|`string`|server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs| ## PersistentVolumeClaimVolumeSource @@ -6499,8 +6514,8 @@ PersistentVolumeClaimVolumeSource references the user's PVC in the same namespac ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`claimName`|`string`|ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| -|`readOnly`|`boolean`|Will force the ReadOnly setting in VolumeMounts. Default false.| +|`claimName`|`string`|claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims| +|`readOnly`|`boolean`|readOnly Will force the ReadOnly setting in VolumeMounts. Default false.| ## PhotonPersistentDiskVolumeSource @@ -6509,8 +6524,8 @@ Represents a Photon Controller persistent disk resource. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`pdID`|`string`|ID that identifies Photon Controller persistent disk| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`pdID`|`string`|pdID is the ID that identifies Photon Controller persistent disk| ## PortworxVolumeSource @@ -6519,9 +6534,9 @@ PortworxVolumeSource represents a Portworx volume resource. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.| -|`readOnly`|`boolean`|Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`volumeID`|`string`|VolumeID uniquely identifies a Portworx volume| +|`fsType`|`string`|fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`volumeID`|`string`|volumeID uniquely identifies a Portworx volume| ## ProjectedVolumeSource @@ -6530,8 +6545,8 @@ Represents a projected volume source ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`defaultMode`|`integer`|Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`sources`|`Array<`[`VolumeProjection`](#volumeprojection)`>`|list of volume projections| +|`defaultMode`|`integer`|defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`sources`|`Array<`[`VolumeProjection`](#volumeprojection)`>`|sources is the list of volume projections| ## QuobyteVolumeSource @@ -6540,12 +6555,12 @@ Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`group`|`string`|Group to map volume access to Default is no group| -|`readOnly`|`boolean`|ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.| -|`registry`|`string`|Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes| -|`tenant`|`string`|Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin| -|`user`|`string`|User to map volume access to Defaults to serivceaccount user| -|`volume`|`string`|Volume is a string that references an already created Quobyte volume by name.| +|`group`|`string`|group to map volume access to Default is no group| +|`readOnly`|`boolean`|readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.| +|`registry`|`string`|registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes| +|`tenant`|`string`|tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin| +|`user`|`string`|user to map volume access to Defaults to serivceaccount user| +|`volume`|`string`|volume is a string that references an already created Quobyte volume by name.| ## RBDVolumeSource @@ -6554,14 +6569,14 @@ Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volu ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd| -|`image`|`string`|The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`keyring`|`string`|Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`monitors`|`Array< string >`|A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`pool`|`string`|The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`readOnly`|`boolean`|ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| -|`user`|`string`|The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`fsType`|`string`|fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd| +|`image`|`string`|image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`keyring`|`string`|keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`monitors`|`Array< string >`|monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`pool`|`string`|pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`readOnly`|`boolean`|readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| +|`user`|`string`|user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it| ## ScaleIOVolumeSource @@ -6570,16 +6585,16 @@ ScaleIOVolumeSource represents a persistent ScaleIO volume ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".| -|`gateway`|`string`|The host address of the ScaleIO API Gateway.| -|`protectionDomain`|`string`|The name of the ScaleIO Protection Domain for the configured storage.| -|`readOnly`|`boolean`|Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.| -|`sslEnabled`|`boolean`|Flag to enable/disable SSL communication with Gateway, default false| -|`storageMode`|`string`|Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.| -|`storagePool`|`string`|The ScaleIO Storage Pool associated with the protection domain.| -|`system`|`string`|The name of the storage system as configured in ScaleIO.| -|`volumeName`|`string`|The name of a volume already created in the ScaleIO system that is associated with this volume source.| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".| +|`gateway`|`string`|gateway is the host address of the ScaleIO API Gateway.| +|`protectionDomain`|`string`|protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.| +|`readOnly`|`boolean`|readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.| +|`sslEnabled`|`boolean`|sslEnabled Flag enable/disable SSL communication with Gateway, default false| +|`storageMode`|`string`|storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.| +|`storagePool`|`string`|storagePool is the ScaleIO Storage Pool associated with the protection domain.| +|`system`|`string`|system is the name of the storage system as configured in ScaleIO.| +|`volumeName`|`string`|volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.| ## SecretVolumeSource @@ -6596,10 +6611,10 @@ Adapts a Secret into a volume. The contents of the target Secret's Data field wi ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`defaultMode`|`integer`|Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| -|`optional`|`boolean`|Specify whether the Secret or its keys must be defined| -|`secretName`|`string`|Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| +|`defaultMode`|`integer`|defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`optional`|`boolean`|optional field specify whether the Secret or its keys must be defined| +|`secretName`|`string`|secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret| ## StorageOSVolumeSource @@ -6608,11 +6623,11 @@ Represents a StorageOS persistent volume resource. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`readOnly`|`boolean`|Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| -|`secretRef`|[`LocalObjectReference`](#localobjectreference)|SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.| -|`volumeName`|`string`|VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.| -|`volumeNamespace`|`string`|VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.| +|`fsType`|`string`|fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`readOnly`|`boolean`|readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.| +|`secretRef`|[`LocalObjectReference`](#localobjectreference)|secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.| +|`volumeName`|`string`|volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.| +|`volumeNamespace`|`string`|volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.| ## VsphereVirtualDiskVolumeSource @@ -6621,10 +6636,10 @@ Represents a vSphere volume resource. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`fsType`|`string`|Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| -|`storagePolicyID`|`string`|Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.| -|`storagePolicyName`|`string`|Storage Policy Based Management (SPBM) profile name.| -|`volumePath`|`string`|Path that identifies vSphere volume vmdk| +|`fsType`|`string`|fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.| +|`storagePolicyID`|`string`|storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.| +|`storagePolicyName`|`string`|storagePolicyName is the storage Policy Based Management (SPBM) profile name.| +|`volumePath`|`string`|volumePath is the path that identifies vSphere volume vmdk| ## LabelSelectorRequirement @@ -6776,7 +6791,7 @@ HTTPGetAction describes an action based on HTTP Get requests. |`httpHeaders`|`Array<`[`HTTPHeader`](#httpheader)`>`|Custom headers to set in the request. HTTP allows repeated headers.| |`path`|`string`|Path to access on the HTTP server.| |`port`|[`IntOrString`](#intorstring)|Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.| -|`scheme`|`string`|Scheme to use for connecting to the host. Defaults to HTTP. Possible enum values: - `"HTTP"` means that the scheme used will be http:// - `"HTTPS"` means that the scheme used will be https://| +|`scheme`|`string`|Scheme to use for connecting to the host. Defaults to HTTP.| ## TCPSocketAction @@ -6788,9 +6803,18 @@ TCPSocketAction describes an action based on opening a socket |`host`|`string`|Optional: Host name to connect to, defaults to the pod IP.| |`port`|[`IntOrString`](#intorstring)|Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.| +## ResourceClaim + +ResourceClaim references one entry in PodSpec.ResourceClaims. + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`name`|`string`|Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.| + ## Quantity -Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ::= (Note that may be empty, from the "" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= "+" | "-" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | "" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= "e" | "E" No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in "canonical form". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as "1500m" 1.5Gi will be serialized as "1536Mi" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. +Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` ::= (Note that may be empty, from the "" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= "+" | "-" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | "" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= "e" | "E" ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in "canonical form". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as "1500m" - 1.5Gi will be serialized as "1536Mi" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.
Examples with this field (click to open) @@ -6853,8 +6877,8 @@ Defines a set of pods (namely those matching the labelSelector relative to the g | Field Name | Field Type | Description | |:----------:|:----------:|---------------| |`labelSelector`|[`LabelSelector`](#labelselector)|A label query over a set of resources, in this case pods.| -|`namespaceSelector`|[`LabelSelector`](#labelselector)|A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.| -|`namespaces`|`Array< string >`|namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"| +|`namespaceSelector`|[`LabelSelector`](#labelselector)|A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces.| +|`namespaces`|`Array< string >`|namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace".| |`topologyKey`|`string`|This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.| ## TypedLocalObjectReference @@ -6868,19 +6892,31 @@ TypedLocalObjectReference contains enough information to let you locate the type |`kind`|`string`|Kind is the type of resource being referenced| |`name`|`string`|Name is the name of resource being referenced| +## TypedObjectReference + +_No description available_ + +### Fields +| Field Name | Field Type | Description | +|:----------:|:----------:|---------------| +|`apiGroup`|`string`|APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.| +|`kind`|`string`|Kind is the type of resource being referenced| +|`name`|`string`|Name is the name of resource being referenced| +|`namespace`|`string`|Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.| + ## PersistentVolumeClaimCondition -PersistentVolumeClaimCondition contails details about state of pvc +PersistentVolumeClaimCondition contains details about state of pvc ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`lastProbeTime`|[`Time`](#time)|Last time we probed the condition.| -|`lastTransitionTime`|[`Time`](#time)|Last time the condition transitioned from one status to another.| -|`message`|`string`|Human-readable message indicating details about last transition.| -|`reason`|`string`|Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.| +|`lastProbeTime`|[`Time`](#time)|lastProbeTime is the time we probed the condition.| +|`lastTransitionTime`|[`Time`](#time)|lastTransitionTime is the time the condition transitioned from one status to another.| +|`message`|`string`|message is the human-readable message indicating details about last transition.| +|`reason`|`string`|reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized.| |`status`|`string`|_No description available_| -|`type`|`string`| Possible enum values: - `"FileSystemResizePending"` - controller resize is finished and a file system resize is pending on node - `"Resizing"` - a user trigger resize of pvc has been started| +|`type`|`string`|_No description available_| ## KeyToPath @@ -6889,9 +6925,9 @@ Maps a string key to a path within a volume. ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`key`|`string`|The key to project.| -|`mode`|`integer`|Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| -|`path`|`string`|The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.| +|`key`|`string`|key is the key to project.| +|`mode`|`integer`|mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.| +|`path`|`string`|path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.| ## DownwardAPIVolumeFile @@ -6922,10 +6958,10 @@ Projection that may be projected along with other supported volume types ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`configMap`|[`ConfigMapProjection`](#configmapprojection)|information about the configMap data to project| -|`downwardAPI`|[`DownwardAPIProjection`](#downwardapiprojection)|information about the downwardAPI data to project| -|`secret`|[`SecretProjection`](#secretprojection)|information about the secret data to project| -|`serviceAccountToken`|[`ServiceAccountTokenProjection`](#serviceaccounttokenprojection)|information about the serviceAccountToken data to project| +|`configMap`|[`ConfigMapProjection`](#configmapprojection)|configMap information about the configMap data to project| +|`downwardAPI`|[`DownwardAPIProjection`](#downwardapiprojection)|downwardAPI information about the downwardAPI data to project| +|`secret`|[`SecretProjection`](#secretprojection)|secret information about the secret data to project| +|`serviceAccountToken`|[`ServiceAccountTokenProjection`](#serviceaccounttokenprojection)|serviceAccountToken is information about the serviceAccountToken data to project| ## ObjectFieldSelector @@ -6955,7 +6991,7 @@ HTTPHeader describes a custom header to be used in HTTP probes ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`name`|`string`|The header field name| +|`name`|`string`|The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.| |`value`|`string`|The header field value| ## NodeSelectorTerm @@ -6981,9 +7017,9 @@ Adapts a ConfigMap into a projected volume. The contents of the target ConfigMap ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| |`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|Specify whether the ConfigMap or its keys must be defined| +|`optional`|`boolean`|optional specify whether the ConfigMap or its keys must be defined| ## DownwardAPIProjection @@ -7009,9 +7045,9 @@ Adapts a secret into a projected volume. The contents of the target Secret's Dat ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| +|`items`|`Array<`[`KeyToPath`](#keytopath)`>`|items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.| |`name`|`string`|Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names| -|`optional`|`boolean`|Specify whether the Secret or its key must be defined| +|`optional`|`boolean`|optional field specify whether the Secret or its key must be defined| ## ServiceAccountTokenProjection @@ -7020,9 +7056,9 @@ ServiceAccountTokenProjection represents a projected service account token volum ### Fields | Field Name | Field Type | Description | |:----------:|:----------:|---------------| -|`audience`|`string`|Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.| -|`expirationSeconds`|`integer`|ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.| -|`path`|`string`|Path is the path relative to the mount point of the file to project the token into.| +|`audience`|`string`|audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.| +|`expirationSeconds`|`integer`|expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.| +|`path`|`string`|path is the path relative to the mount point of the file to project the token into.| ## NodeSelectorRequirement @@ -7032,5 +7068,5 @@ A node selector requirement is a selector that contains values, a key, and an op | Field Name | Field Type | Description | |:----------:|:----------:|---------------| |`key`|`string`|The label key that the selector applies to.| -|`operator`|`string`|Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Possible enum values: - `"DoesNotExist"` - `"Exists"` - `"Gt"` - `"In"` - `"Lt"` - `"NotIn"`| +|`operator`|`string`|Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.| |`values`|`Array< string >`|An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.| diff --git a/go.mod b/go.mod index a6f584affc3e..bc14dbca4894 100644 --- a/go.mod +++ b/go.mod @@ -64,15 +64,15 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe google.golang.org/grpc v1.61.0 gopkg.in/go-playground/webhooks.v5 v5.17.0 - k8s.io/api v0.26.15 - k8s.io/apimachinery v0.26.15 - k8s.io/cli-runtime v0.26.15 - k8s.io/client-go v0.26.15 + k8s.io/api v0.27.12 + k8s.io/apimachinery v0.27.12 + k8s.io/cli-runtime v0.27.12 + k8s.io/client-go v0.27.12 k8s.io/gengo v0.0.0-20220902162205-c0856e24416d - k8s.io/klog/v2 v2.80.1 - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 - k8s.io/kubectl v0.26.15 - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d + k8s.io/klog/v2 v2.90.1 + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f + k8s.io/kubectl v0.27.12 + k8s.io/utils v0.0.0-20230209194617-a36077c30491 sigs.k8s.io/yaml v1.4.0 ) @@ -118,10 +118,11 @@ require ( go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/tools v0.16.1 // indirect google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect + sigs.k8s.io/kustomize/kustomize/v5 v5.0.1 // indirect ) require ( @@ -186,7 +187,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fvbommel/sortorder v1.0.1 // indirect - github.com/go-errors/errors v1.0.1 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -283,13 +284,12 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.26.15 // indirect - k8s.io/component-helpers v0.26.15 // indirect - k8s.io/metrics v0.26.15 // indirect + k8s.io/component-base v0.27.12 // indirect + k8s.io/component-helpers v0.27.12 // indirect + k8s.io/metrics v0.27.12 // indirect moul.io/http2curl/v2 v2.3.0 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.13.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 66d56f1ae64f..e7e25fb3b3e3 100644 --- a/go.sum +++ b/go.sum @@ -281,8 +281,8 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= @@ -320,6 +320,7 @@ github.com/go-openapi/swag v0.22.6/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmrid github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe h1:zn8tqiUbec4wR94o7Qj3LZCAT6uGobhEgnDRg6isG5U= github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -404,6 +405,8 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -659,8 +662,8 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -706,8 +709,8 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= @@ -914,8 +917,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1085,8 +1088,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1184,20 +1187,20 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.17.8/go.mod h1:N++Llhs8kCixMUoCaXXAyMMPbo8dDVnh+IQ36xZV2/0= -k8s.io/api v0.26.15 h1:tjMERUjIwkq+2UtPZL5ZbSsLkpxUv4gXWZfV5lQl+Og= -k8s.io/api v0.26.15/go.mod h1:CtWOrFl8VLCTLolRlhbBxo4fy83tjCLEtYa5pMubIe0= +k8s.io/api v0.27.12 h1:Qprj/nuFj4xjbsAuJ05F1sCHs1d0x33n/Ni0oAVEFDo= +k8s.io/api v0.27.12/go.mod h1:PNRL63V26JzKe2++ho6W/YRp3k9XG7nirN4J7WRy5gY= k8s.io/apimachinery v0.17.8/go.mod h1:Lg8zZ5iC/O8UjCqW6DNhcQG2m4TdjF9kwG3891OWbbA= -k8s.io/apimachinery v0.26.15 h1:GPxeERYBSqSZlj3xIkX4L6mBjzZ9q8JPnJ+Vj15qe+g= -k8s.io/apimachinery v0.26.15/go.mod h1:O/uIhIOWuy6ndHqQ6qbkjD7OgeMhVtlk8+Z66ZcmJQc= -k8s.io/cli-runtime v0.26.15 h1:+y3am0YLVBEfe4je5taxVUM8EKQKnUqzmXBdn3Ytxko= -k8s.io/cli-runtime v0.26.15/go.mod h1:AXABAdbXP0xeIJV4SpJ1caMR7FY8GjXTxMsJ5/1iMF0= +k8s.io/apimachinery v0.27.12 h1:Nt20vwaAHcZsM4WdkOtLaDeBJHg9QJW8JyOOEn6xzRA= +k8s.io/apimachinery v0.27.12/go.mod h1:5/SjQaDYQgZOv8kuzNMzmNGrqh4/iyknC5yWjxU9ll8= +k8s.io/cli-runtime v0.27.12 h1:9iMbnrIuKe6B+0iIgMJueXace7e3lEQwU5hOrZM1nBo= +k8s.io/cli-runtime v0.27.12/go.mod h1:UWZLKn9tn/Z451VAVUTGb1bg2Zz7pSf2u7AJPLcJE7g= k8s.io/client-go v0.17.8/go.mod h1:SJsDS64AAtt9VZyeaQMb4Ck5etCitZ/FwajWdzua5eY= -k8s.io/client-go v0.26.15 h1:A2Yav2v+VZQfpEsf5ESFp2Lqq5XACKBDrwkG+jEtOg0= -k8s.io/client-go v0.26.15/go.mod h1:KJs7snLEyKPlypqTQG/ngcaqE6h3/6qTvVHDViRL+iI= -k8s.io/component-base v0.26.15 h1:32XJyv5fo/lbDZhYU1HyISXTgdSUkbW5cO4DhfR6Y/8= -k8s.io/component-base v0.26.15/go.mod h1:9V+nBzUtTNtRuYfYmQQEhuKrjhL80i2l6F2H2qUsHAI= -k8s.io/component-helpers v0.26.15 h1:2ln2voQ6oLMUKzksr29g47iE1Y0rLdB+2KICF8F1f5Q= -k8s.io/component-helpers v0.26.15/go.mod h1:UwLS62rpGU8sIJfnBWChicMdf14y9hdu5DXicHay4Hk= +k8s.io/client-go v0.27.12 h1:ouIB3ZitBjmBWh/9auP4erVl8AXkheqcmbH7FSFa7DI= +k8s.io/client-go v0.27.12/go.mod h1:h3X7RGr5s9Wm4NtI06Bzt3am4Kj6aXuZQcP7OD+48Sk= +k8s.io/component-base v0.27.12 h1:2/ooM9/gNxS2fZuRnLj3TWWg7KnEYmRj321du10waZA= +k8s.io/component-base v0.27.12/go.mod h1:SPA6ABqK2HDRuzMjoEEkj7ciDvK5bUpdHtvZQwdi/xM= +k8s.io/component-helpers v0.27.12 h1:bT7oDIXvDHK3nI/yrv2/N9nYmVuttwtR3DKhngbL7nQ= +k8s.io/component-helpers v0.27.12/go.mod h1:Wn2l/7jpmZbC1VpmUTeLYh0Wsp20SFfIYNNJAHSVX7g= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1206,18 +1209,18 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kubectl v0.26.15 h1:Q118/ZVWmUYEm6Iod8MKuxQFwTBBopBogGq5tkudvhg= -k8s.io/kubectl v0.26.15/go.mod h1:JgN3H70qdFjI/93T91gVOAsSExxNmccoCQLDNX//aYw= -k8s.io/metrics v0.26.15 h1:U+FLqs8aFMVBWycx/lZn8nSBP5lfdmQMCqG288XOsbs= -k8s.io/metrics v0.26.15/go.mod h1:zUiNijWF/4zP8s+BXjPNEaswdwE7g2VSJr5lT3eBMYs= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kubectl v0.27.12 h1:BYRMNxmrazTNp5/CH7QReaMjoeQIESMv34fpDuC1czE= +k8s.io/kubectl v0.27.12/go.mod h1:IKQvyGhgZBY6KEXOcHoV8Ac7NIt/9PqI6MVrtmhi4NA= +k8s.io/metrics v0.27.12 h1:FSrOhJ1LdvMCk+5doBzs+hu6IGyxp41nEjnW0KkaEPI= +k8s.io/metrics v0.27.12/go.mod h1:moXvBzculdWcjF8eNPi7PN4Xk9kHXN/MOsLlGe3B/lw= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= +k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= modernc.org/b v1.0.4/go.mod h1:Oqc2xtmGT0tvBUsPZIanirLhxBCQZhM7Lu3TlzBj9w8= modernc.org/b v1.1.0/go.mod h1:yF+wmBAFjebNdVqZNTeNfmnLaLqq91wozvDLcuXz+ck= modernc.org/db v1.0.8/go.mod h1:L8Az96H46DF2+BGeaS6+WiEqLORR2sjp0yBn6LA/lAQ= @@ -1257,14 +1260,14 @@ modernc.org/zappy v1.0.9/go.mod h1:y2c4Hv5jzyBP179SxNmx5H/BM6cVgNIXPQv2bCeR6IM= modernc.org/zappy v1.1.0/go.mod h1:cxC0dWAgZuyMsJ+KL3ZBgo3twyKGBB/0By/umSZE2bQ= moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.7 h1:cDW6AVMl6t/SLuQaezMET8hgnadZGIAr8tUrxFVOrpg= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.13.2 h1:kejWfLeJhUsTGioDoFNJET5LQe/ajzXhJGYoU+pJsiA= +sigs.k8s.io/kustomize/api v0.13.2/go.mod h1:DUp325VVMFVcQSq+ZxyDisA8wtldwHxLZbr1g94UHsw= +sigs.k8s.io/kustomize/kustomize/v5 v5.0.1 h1:HWXbyKDNwGqol+s/sMNr/vnfNME/EoMdEraP4ZkUQek= +sigs.k8s.io/kustomize/kustomize/v5 v5.0.1/go.mod h1:Q8o+soB41Pn1y26eXzG9cniuECDpTJe2eKOA1fENCU8= +sigs.k8s.io/kustomize/kyaml v0.14.1 h1:c8iibius7l24G2wVAGZn/Va2wNys03GXLjYVIcFVxKA= +sigs.k8s.io/kustomize/kyaml v0.14.1/go.mod h1:AN1/IpawKilWD7V+YvQwRGUvuUOOWpjsHu6uHwonSF4= sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= diff --git a/manifests/base/crds/full/argoproj.io_clusterworkflowtemplates.yaml b/manifests/base/crds/full/argoproj.io_clusterworkflowtemplates.yaml index 78a364fb9e80..415dfb32eaa5 100644 --- a/manifests/base/crds/full/argoproj.io_clusterworkflowtemplates.yaml +++ b/manifests/base/crds/full/argoproj.io_clusterworkflowtemplates.yaml @@ -3184,6 +3184,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -3794,6 +3807,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -6307,6 +6333,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -8783,6 +8822,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -9458,6 +9510,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -11670,6 +11735,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -12280,6 +12358,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -14793,6 +14884,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17269,6 +17373,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17944,6 +18061,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: diff --git a/manifests/base/crds/full/argoproj.io_cronworkflows.yaml b/manifests/base/crds/full/argoproj.io_cronworkflows.yaml index 3bd8d118f6a0..d7d85370d380 100644 --- a/manifests/base/crds/full/argoproj.io_cronworkflows.yaml +++ b/manifests/base/crds/full/argoproj.io_cronworkflows.yaml @@ -3216,6 +3216,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -3826,6 +3839,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -6339,6 +6365,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -8815,6 +8854,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -9490,6 +9542,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -11702,6 +11767,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -12312,6 +12390,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -14825,6 +14916,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17301,6 +17405,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17976,6 +18093,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: diff --git a/manifests/base/crds/full/argoproj.io_workflows.yaml b/manifests/base/crds/full/argoproj.io_workflows.yaml index d0c5aa874f0d..e1a3ef2e8946 100644 --- a/manifests/base/crds/full/argoproj.io_workflows.yaml +++ b/manifests/base/crds/full/argoproj.io_workflows.yaml @@ -3198,6 +3198,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -3808,6 +3821,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -6321,6 +6347,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -8797,6 +8836,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -9472,6 +9524,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -11684,6 +11749,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -12294,6 +12372,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -14807,6 +14898,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17283,6 +17387,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17958,6 +18075,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -23981,6 +24111,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -24591,6 +24734,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -27104,6 +27260,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -29580,6 +29749,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -30255,6 +30437,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -34399,6 +34594,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -35009,6 +35217,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -37522,6 +37743,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -39998,6 +40232,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -40673,6 +40920,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -42885,6 +43145,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -43495,6 +43768,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -46008,6 +46294,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -48484,6 +48783,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -49159,6 +49471,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: diff --git a/manifests/base/crds/full/argoproj.io_workflowtasksets.yaml b/manifests/base/crds/full/argoproj.io_workflowtasksets.yaml index 9f4475e344ee..e4c15a75d3ba 100644 --- a/manifests/base/crds/full/argoproj.io_workflowtasksets.yaml +++ b/manifests/base/crds/full/argoproj.io_workflowtasksets.yaml @@ -1254,6 +1254,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -1864,6 +1877,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -4377,6 +4403,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -6853,6 +6892,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -7528,6 +7580,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: diff --git a/manifests/base/crds/full/argoproj.io_workflowtemplates.yaml b/manifests/base/crds/full/argoproj.io_workflowtemplates.yaml index e971cf10d51c..1e073b757ac2 100644 --- a/manifests/base/crds/full/argoproj.io_workflowtemplates.yaml +++ b/manifests/base/crds/full/argoproj.io_workflowtemplates.yaml @@ -3183,6 +3183,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -3793,6 +3806,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -6306,6 +6332,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -8782,6 +8821,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -9457,6 +9509,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -11669,6 +11734,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -12279,6 +12357,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -14792,6 +14883,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17268,6 +17372,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: @@ -17943,6 +18060,19 @@ spec: format: int32 type: integer type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: properties: claims: diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 8f025f9528fc..b966af7f48db 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -1791,6 +1791,25 @@ func schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref common.ReferenceCallbac Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, + "resizePolicy": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resources resize policy for the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerResizePolicy"), + }, + }, + }, + }, + }, "volumeMounts": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -1922,7 +1941,7 @@ func schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref common.ReferenceCallbac }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.ContainerResizePolicy", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, } } @@ -5537,6 +5556,25 @@ func schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref common.ReferenceCallba Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, + "resizePolicy": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resources resize policy for the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerResizePolicy"), + }, + }, + }, + }, + }, "volumeMounts": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -5662,7 +5700,7 @@ func schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref common.ReferenceCallba }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.ContainerResizePolicy", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, } } @@ -6648,6 +6686,25 @@ func schema_pkg_apis_workflow_v1alpha1_UserContainer(ref common.ReferenceCallbac Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, + "resizePolicy": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resources resize policy for the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerResizePolicy"), + }, + }, + }, + }, + }, "volumeMounts": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -6772,7 +6829,7 @@ func schema_pkg_apis_workflow_v1alpha1_UserContainer(ref common.ReferenceCallbac }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.ContainerResizePolicy", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, } } diff --git a/pkg/plugins/executor/swagger.yml b/pkg/plugins/executor/swagger.yml index ef4de7072265..710501f32dbe 100644 --- a/pkg/plugins/executor/swagger.yml +++ b/pkg/plugins/executor/swagger.yml @@ -729,6 +729,15 @@ definitions: type: array readinessProbe: $ref: '#/definitions/Probe' + resizePolicy: + description: |- + Resources resize policy for the container. + +featureGate=InPlacePodVerticalScaling + +optional + +listType=atomic + items: + $ref: '#/definitions/ContainerResizePolicy' + type: array resources: $ref: '#/definitions/ResourceRequirements' securityContext: @@ -897,6 +906,15 @@ definitions: type: array readinessProbe: $ref: '#/definitions/Probe' + resizePolicy: + description: |- + Resources resize policy for the container. + +featureGate=InPlacePodVerticalScaling + +optional + +listType=atomic + items: + $ref: '#/definitions/ContainerResizePolicy' + type: array resources: $ref: '#/definitions/ResourceRequirements' securityContext: @@ -1001,6 +1019,14 @@ definitions: $ref: '#/definitions/Protocol' title: ContainerPort represents a network port in a single container. type: object + ContainerResizePolicy: + properties: + resourceName: + $ref: '#/definitions/ResourceName' + restartPolicy: + $ref: '#/definitions/ResourceResizeRestartPolicy' + title: ContainerResizePolicy represents resource resize policy for the container. + type: object ContainerSetRetryStrategy: properties: duration: @@ -2425,7 +2451,7 @@ definitions: name: description: |- Name of the referent. - More info: http://kubernetes.io/docs/user-guide/identifiers#names + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names type: string uid: $ref: '#/definitions/UID' @@ -2510,7 +2536,7 @@ definitions: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations +optional type: object creationTimestamp: @@ -2576,7 +2602,7 @@ definitions: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels +optional type: object managedFields: @@ -2600,7 +2626,7 @@ definitions: automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. - More info: http://kubernetes.io/docs/user-guide/identifiers#names + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names +optional type: string namespace: @@ -2612,7 +2638,7 @@ definitions: Must be a DNS_LABEL. Cannot be updated. - More info: http://kubernetes.io/docs/user-guide/namespaces + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces +optional type: string ownerReferences: @@ -3246,6 +3272,9 @@ definitions: $ref: '#/definitions/Quantity' title: ResourceList is a set of (resource name, quantity) pairs. type: object + ResourceName: + title: ResourceName is the name identifying various resources in a ResourceList. + type: string ResourceRequirements: properties: claims: @@ -3271,6 +3300,10 @@ definitions: $ref: '#/definitions/ResourceList' title: ResourceRequirements describes the compute resource requirements. type: object + ResourceResizeRestartPolicy: + title: ResourceResizeRestartPolicy specifies how to handle container resource + resize. + type: string ResourceTemplate: description: ResourceTemplate is a template subtype to manipulate kubernetes resources properties: @@ -3571,6 +3604,15 @@ definitions: type: array readinessProbe: $ref: '#/definitions/Probe' + resizePolicy: + description: |- + Resources resize policy for the container. + +featureGate=InPlacePodVerticalScaling + +optional + +listType=atomic + items: + $ref: '#/definitions/ContainerResizePolicy' + type: array resources: $ref: '#/definitions/ResourceRequirements' securityContext: @@ -4389,6 +4431,15 @@ definitions: type: array readinessProbe: $ref: '#/definitions/Probe' + resizePolicy: + description: |- + Resources resize policy for the container. + +featureGate=InPlacePodVerticalScaling + +optional + +listType=atomic + items: + $ref: '#/definitions/ContainerResizePolicy' + type: array resources: $ref: '#/definitions/ResourceRequirements' securityContext: diff --git a/sdks/java/client/docs/AWSElasticBlockStoreVolumeSource.md b/sdks/java/client/docs/AWSElasticBlockStoreVolumeSource.md index cc450923116c..c80e3411e458 100644 --- a/sdks/java/client/docs/AWSElasticBlockStoreVolumeSource.md +++ b/sdks/java/client/docs/AWSElasticBlockStoreVolumeSource.md @@ -8,10 +8,10 @@ Represents a Persistent Disk resource in AWS. An AWS EBS disk must exist before Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] -**partition** | **Integer** | The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). | [optional] -**readOnly** | **Boolean** | Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] -**volumeID** | **String** | Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | +**fsType** | **String** | fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] +**partition** | **Integer** | partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). | [optional] +**readOnly** | **Boolean** | readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] +**volumeID** | **String** | volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | diff --git a/sdks/java/client/docs/ArchivedWorkflowServiceApi.md b/sdks/java/client/docs/ArchivedWorkflowServiceApi.md index a58d43147006..8dada938c5a3 100644 --- a/sdks/java/client/docs/ArchivedWorkflowServiceApi.md +++ b/sdks/java/client/docs/ArchivedWorkflowServiceApi.md @@ -225,7 +225,7 @@ Name | Type | Description | Notes # **archivedWorkflowServiceListArchivedWorkflowLabelValues** -> IoArgoprojWorkflowV1alpha1LabelValues archivedWorkflowServiceListArchivedWorkflowLabelValues(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, namespace) +> IoArgoprojWorkflowV1alpha1LabelValues archivedWorkflowServiceListArchivedWorkflowLabelValues(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, namespace) @@ -260,9 +260,10 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional String namespace = "namespace_example"; // String | try { - IoArgoprojWorkflowV1alpha1LabelValues result = apiInstance.archivedWorkflowServiceListArchivedWorkflowLabelValues(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, namespace); + IoArgoprojWorkflowV1alpha1LabelValues result = apiInstance.archivedWorkflowServiceListArchivedWorkflowLabelValues(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, namespace); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling ArchivedWorkflowServiceApi#archivedWorkflowServiceListArchivedWorkflowLabelValues"); @@ -288,6 +289,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **namespace** | **String**| | [optional] ### Return type @@ -311,7 +313,7 @@ Name | Type | Description | Notes # **archivedWorkflowServiceListArchivedWorkflows** -> IoArgoprojWorkflowV1alpha1WorkflowList archivedWorkflowServiceListArchivedWorkflows(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, namePrefix, namespace) +> IoArgoprojWorkflowV1alpha1WorkflowList archivedWorkflowServiceListArchivedWorkflows(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, namePrefix, namespace) @@ -346,10 +348,11 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional String namePrefix = "namePrefix_example"; // String | String namespace = "namespace_example"; // String | try { - IoArgoprojWorkflowV1alpha1WorkflowList result = apiInstance.archivedWorkflowServiceListArchivedWorkflows(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, namePrefix, namespace); + IoArgoprojWorkflowV1alpha1WorkflowList result = apiInstance.archivedWorkflowServiceListArchivedWorkflows(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, namePrefix, namespace); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling ArchivedWorkflowServiceApi#archivedWorkflowServiceListArchivedWorkflows"); @@ -375,6 +378,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **namePrefix** | **String**| | [optional] **namespace** | **String**| | [optional] diff --git a/sdks/java/client/docs/AzureDiskVolumeSource.md b/sdks/java/client/docs/AzureDiskVolumeSource.md index 740ac11d6488..b2bcecadc9ee 100644 --- a/sdks/java/client/docs/AzureDiskVolumeSource.md +++ b/sdks/java/client/docs/AzureDiskVolumeSource.md @@ -8,12 +8,12 @@ AzureDisk represents an Azure Data Disk mount on the host and bind mount to the Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**cachingMode** | **String** | Host Caching mode: None, Read Only, Read Write. | [optional] -**diskName** | **String** | The Name of the data disk in the blob storage | -**diskURI** | **String** | The URI the data disk in the blob storage | -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**kind** | **String** | Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared | [optional] -**readOnly** | **Boolean** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**cachingMode** | **String** | cachingMode is the Host Caching mode: None, Read Only, Read Write. | [optional] +**diskName** | **String** | diskName is the Name of the data disk in the blob storage | +**diskURI** | **String** | diskURI is the URI of data disk in the blob storage | +**fsType** | **String** | fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**kind** | **String** | kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared | [optional] +**readOnly** | **Boolean** | readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] diff --git a/sdks/java/client/docs/AzureFileVolumeSource.md b/sdks/java/client/docs/AzureFileVolumeSource.md index 541eb41abe84..8477c5cbd228 100644 --- a/sdks/java/client/docs/AzureFileVolumeSource.md +++ b/sdks/java/client/docs/AzureFileVolumeSource.md @@ -8,9 +8,9 @@ AzureFile represents an Azure File Service mount on the host and bind mount to t Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**readOnly** | **Boolean** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] -**secretName** | **String** | the name of secret that contains Azure Storage Account Name and Key | -**shareName** | **String** | Share Name | +**readOnly** | **Boolean** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**secretName** | **String** | secretName is the name of secret that contains Azure Storage Account Name and Key | +**shareName** | **String** | shareName is the azure share Name | diff --git a/sdks/java/client/docs/CSIVolumeSource.md b/sdks/java/client/docs/CSIVolumeSource.md index 9b9d026c9623..1d9f73369a16 100644 --- a/sdks/java/client/docs/CSIVolumeSource.md +++ b/sdks/java/client/docs/CSIVolumeSource.md @@ -8,11 +8,11 @@ Represents a source location of a volume to mount, managed by an external CSI dr Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**driver** | **String** | Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. | -**fsType** | **String** | Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. | [optional] +**driver** | **String** | driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. | +**fsType** | **String** | fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. | [optional] **nodePublishSecretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | [optional] -**readOnly** | **Boolean** | Specifies a read-only configuration for the volume. Defaults to false (read/write). | [optional] -**volumeAttributes** | **Map<String, String>** | VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. | [optional] +**readOnly** | **Boolean** | readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). | [optional] +**volumeAttributes** | **Map<String, String>** | volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. | [optional] diff --git a/sdks/java/client/docs/CephFSVolumeSource.md b/sdks/java/client/docs/CephFSVolumeSource.md index 0b8159841f93..30c086f19d3c 100644 --- a/sdks/java/client/docs/CephFSVolumeSource.md +++ b/sdks/java/client/docs/CephFSVolumeSource.md @@ -8,12 +8,12 @@ Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volum Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**monitors** | **List<String>** | Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | -**path** | **String** | Optional: Used as the mounted root, rather than the full Ceph tree, default is / | [optional] -**readOnly** | **Boolean** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] -**secretFile** | **String** | Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] +**monitors** | **List<String>** | monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | +**path** | **String** | path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / | [optional] +**readOnly** | **Boolean** | readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] +**secretFile** | **String** | secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] **secretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | [optional] -**user** | **String** | Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] +**user** | **String** | user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] diff --git a/sdks/java/client/docs/CinderVolumeSource.md b/sdks/java/client/docs/CinderVolumeSource.md index 8d3c198c4c43..7a5c9f5477fc 100644 --- a/sdks/java/client/docs/CinderVolumeSource.md +++ b/sdks/java/client/docs/CinderVolumeSource.md @@ -8,10 +8,10 @@ Represents a cinder volume resource in Openstack. A Cinder volume must exist bef Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] -**readOnly** | **Boolean** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] +**fsType** | **String** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] +**readOnly** | **Boolean** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] **secretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | [optional] -**volumeID** | **String** | volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | +**volumeID** | **String** | volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | diff --git a/sdks/java/client/docs/ClusterWorkflowTemplateServiceApi.md b/sdks/java/client/docs/ClusterWorkflowTemplateServiceApi.md index ba5d566f3623..756816ce7fda 100644 --- a/sdks/java/client/docs/ClusterWorkflowTemplateServiceApi.md +++ b/sdks/java/client/docs/ClusterWorkflowTemplateServiceApi.md @@ -300,7 +300,7 @@ Name | Type | Description | Notes # **clusterWorkflowTemplateServiceListClusterWorkflowTemplates** -> IoArgoprojWorkflowV1alpha1ClusterWorkflowTemplateList clusterWorkflowTemplateServiceListClusterWorkflowTemplates(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> IoArgoprojWorkflowV1alpha1ClusterWorkflowTemplateList clusterWorkflowTemplateServiceListClusterWorkflowTemplates(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -335,8 +335,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - IoArgoprojWorkflowV1alpha1ClusterWorkflowTemplateList result = apiInstance.clusterWorkflowTemplateServiceListClusterWorkflowTemplates(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + IoArgoprojWorkflowV1alpha1ClusterWorkflowTemplateList result = apiInstance.clusterWorkflowTemplateServiceListClusterWorkflowTemplates(listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling ClusterWorkflowTemplateServiceApi#clusterWorkflowTemplateServiceListClusterWorkflowTemplates"); @@ -362,6 +363,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/java/client/docs/ConfigMapProjection.md b/sdks/java/client/docs/ConfigMapProjection.md index 73ca594b9885..b1e667eea3ed 100644 --- a/sdks/java/client/docs/ConfigMapProjection.md +++ b/sdks/java/client/docs/ConfigMapProjection.md @@ -8,9 +8,9 @@ Adapts a ConfigMap into a projected volume. The contents of the target ConfigMa Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**items** | [**List<KeyToPath>**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**items** | [**List<KeyToPath>**](KeyToPath.md) | items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] **name** | **String** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | [optional] -**optional** | **Boolean** | Specify whether the ConfigMap or its keys must be defined | [optional] +**optional** | **Boolean** | optional specify whether the ConfigMap or its keys must be defined | [optional] diff --git a/sdks/java/client/docs/ConfigMapVolumeSource.md b/sdks/java/client/docs/ConfigMapVolumeSource.md index d092fdb3eeb7..1eca8a6475fe 100644 --- a/sdks/java/client/docs/ConfigMapVolumeSource.md +++ b/sdks/java/client/docs/ConfigMapVolumeSource.md @@ -8,10 +8,10 @@ Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data f Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**defaultMode** | **Integer** | Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] -**items** | [**List<KeyToPath>**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**defaultMode** | **Integer** | defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**items** | [**List<KeyToPath>**](KeyToPath.md) | items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] **name** | **String** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | [optional] -**optional** | **Boolean** | Specify whether the ConfigMap or its keys must be defined | [optional] +**optional** | **Boolean** | optional specify whether the ConfigMap or its keys must be defined | [optional] diff --git a/sdks/java/client/docs/ContainerResizePolicy.md b/sdks/java/client/docs/ContainerResizePolicy.md new file mode 100644 index 000000000000..45eebacae2dd --- /dev/null +++ b/sdks/java/client/docs/ContainerResizePolicy.md @@ -0,0 +1,15 @@ + + +# ContainerResizePolicy + +ContainerResizePolicy represents resource resize policy for the container. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**resourceName** | **String** | Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. | +**restartPolicy** | **String** | Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. | + + + diff --git a/sdks/java/client/docs/CronWorkflowServiceApi.md b/sdks/java/client/docs/CronWorkflowServiceApi.md index 9607ebbc7a90..5109d04bb48f 100644 --- a/sdks/java/client/docs/CronWorkflowServiceApi.md +++ b/sdks/java/client/docs/CronWorkflowServiceApi.md @@ -310,7 +310,7 @@ Name | Type | Description | Notes # **cronWorkflowServiceListCronWorkflows** -> IoArgoprojWorkflowV1alpha1CronWorkflowList cronWorkflowServiceListCronWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> IoArgoprojWorkflowV1alpha1CronWorkflowList cronWorkflowServiceListCronWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -346,8 +346,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - IoArgoprojWorkflowV1alpha1CronWorkflowList result = apiInstance.cronWorkflowServiceListCronWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + IoArgoprojWorkflowV1alpha1CronWorkflowList result = apiInstance.cronWorkflowServiceListCronWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling CronWorkflowServiceApi#cronWorkflowServiceListCronWorkflows"); @@ -374,6 +375,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/java/client/docs/EmptyDirVolumeSource.md b/sdks/java/client/docs/EmptyDirVolumeSource.md index 10ccd3c4f8e0..5ad8348b63fe 100644 --- a/sdks/java/client/docs/EmptyDirVolumeSource.md +++ b/sdks/java/client/docs/EmptyDirVolumeSource.md @@ -8,8 +8,8 @@ Represents an empty directory for a pod. Empty directory volumes support ownersh Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**medium** | **String** | What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir | [optional] -**sizeLimit** | **String** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] +**medium** | **String** | medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir | [optional] +**sizeLimit** | **String** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] diff --git a/sdks/java/client/docs/EventServiceApi.md b/sdks/java/client/docs/EventServiceApi.md index f734d99d5ee5..3561c346f076 100644 --- a/sdks/java/client/docs/EventServiceApi.md +++ b/sdks/java/client/docs/EventServiceApi.md @@ -10,7 +10,7 @@ Method | HTTP request | Description # **eventServiceListWorkflowEventBindings** -> IoArgoprojWorkflowV1alpha1WorkflowEventBindingList eventServiceListWorkflowEventBindings(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> IoArgoprojWorkflowV1alpha1WorkflowEventBindingList eventServiceListWorkflowEventBindings(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -46,8 +46,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - IoArgoprojWorkflowV1alpha1WorkflowEventBindingList result = apiInstance.eventServiceListWorkflowEventBindings(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + IoArgoprojWorkflowV1alpha1WorkflowEventBindingList result = apiInstance.eventServiceListWorkflowEventBindings(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling EventServiceApi#eventServiceListWorkflowEventBindings"); @@ -74,6 +75,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/java/client/docs/EventSourceServiceApi.md b/sdks/java/client/docs/EventSourceServiceApi.md index eb4b1557d3fa..c9b82cac3245 100644 --- a/sdks/java/client/docs/EventSourceServiceApi.md +++ b/sdks/java/client/docs/EventSourceServiceApi.md @@ -333,7 +333,7 @@ Name | Type | Description | Notes # **eventSourceServiceListEventSources** -> IoArgoprojEventsV1alpha1EventSourceList eventSourceServiceListEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> IoArgoprojEventsV1alpha1EventSourceList eventSourceServiceListEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -369,8 +369,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - IoArgoprojEventsV1alpha1EventSourceList result = apiInstance.eventSourceServiceListEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + IoArgoprojEventsV1alpha1EventSourceList result = apiInstance.eventSourceServiceListEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling EventSourceServiceApi#eventSourceServiceListEventSources"); @@ -397,6 +398,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -491,7 +493,7 @@ Name | Type | Description | Notes # **eventSourceServiceWatchEventSources** -> StreamResultOfEventsourceEventSourceWatchEvent eventSourceServiceWatchEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> StreamResultOfEventsourceEventSourceWatchEvent eventSourceServiceWatchEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -527,8 +529,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - StreamResultOfEventsourceEventSourceWatchEvent result = apiInstance.eventSourceServiceWatchEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + StreamResultOfEventsourceEventSourceWatchEvent result = apiInstance.eventSourceServiceWatchEventSources(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling EventSourceServiceApi#eventSourceServiceWatchEventSources"); @@ -555,6 +558,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/java/client/docs/FCVolumeSource.md b/sdks/java/client/docs/FCVolumeSource.md index b135eaa1e1b7..c5e9bfb32175 100644 --- a/sdks/java/client/docs/FCVolumeSource.md +++ b/sdks/java/client/docs/FCVolumeSource.md @@ -8,11 +8,11 @@ Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**lun** | **Integer** | Optional: FC target lun number | [optional] -**readOnly** | **Boolean** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] -**targetWWNs** | **List<String>** | Optional: FC target worldwide names (WWNs) | [optional] -**wwids** | **List<String>** | Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. | [optional] +**fsType** | **String** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**lun** | **Integer** | lun is Optional: FC target lun number | [optional] +**readOnly** | **Boolean** | readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**targetWWNs** | **List<String>** | targetWWNs is Optional: FC target worldwide names (WWNs) | [optional] +**wwids** | **List<String>** | wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. | [optional] diff --git a/sdks/java/client/docs/FlexVolumeSource.md b/sdks/java/client/docs/FlexVolumeSource.md index 231274a6790e..9278a1c8405c 100644 --- a/sdks/java/client/docs/FlexVolumeSource.md +++ b/sdks/java/client/docs/FlexVolumeSource.md @@ -8,10 +8,10 @@ FlexVolume represents a generic volume resource that is provisioned/attached usi Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**driver** | **String** | Driver is the name of the driver to use for this volume. | -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. | [optional] -**options** | **Map<String, String>** | Optional: Extra command options if any. | [optional] -**readOnly** | **Boolean** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**driver** | **String** | driver is the name of the driver to use for this volume. | +**fsType** | **String** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. | [optional] +**options** | **Map<String, String>** | options is Optional: this field holds extra command options if any. | [optional] +**readOnly** | **Boolean** | readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **secretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | [optional] diff --git a/sdks/java/client/docs/FlockerVolumeSource.md b/sdks/java/client/docs/FlockerVolumeSource.md index 20d03eb28da1..d2342d76a7d5 100644 --- a/sdks/java/client/docs/FlockerVolumeSource.md +++ b/sdks/java/client/docs/FlockerVolumeSource.md @@ -8,8 +8,8 @@ Represents a Flocker volume mounted by the Flocker agent. One and only one of da Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**datasetName** | **String** | Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated | [optional] -**datasetUUID** | **String** | UUID of the dataset. This is unique identifier of a Flocker dataset | [optional] +**datasetName** | **String** | datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated | [optional] +**datasetUUID** | **String** | datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset | [optional] diff --git a/sdks/java/client/docs/GCEPersistentDiskVolumeSource.md b/sdks/java/client/docs/GCEPersistentDiskVolumeSource.md index 04fb14213bf1..75610ac504e3 100644 --- a/sdks/java/client/docs/GCEPersistentDiskVolumeSource.md +++ b/sdks/java/client/docs/GCEPersistentDiskVolumeSource.md @@ -8,10 +8,10 @@ Represents a Persistent Disk resource in Google Compute Engine. A GCE PD must e Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] -**partition** | **Integer** | The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] -**pdName** | **String** | Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | -**readOnly** | **Boolean** | ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] +**fsType** | **String** | fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] +**partition** | **Integer** | partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] +**pdName** | **String** | pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | +**readOnly** | **Boolean** | readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] diff --git a/sdks/java/client/docs/GitRepoVolumeSource.md b/sdks/java/client/docs/GitRepoVolumeSource.md index b43f031887ce..4775a54678c6 100644 --- a/sdks/java/client/docs/GitRepoVolumeSource.md +++ b/sdks/java/client/docs/GitRepoVolumeSource.md @@ -8,9 +8,9 @@ Represents a volume that is populated with the contents of a git repository. Git Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**directory** | **String** | Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. | [optional] -**repository** | **String** | Repository URL | -**revision** | **String** | Commit hash for the specified revision. | [optional] +**directory** | **String** | directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. | [optional] +**repository** | **String** | repository is the URL | +**revision** | **String** | revision is the commit hash for the specified revision. | [optional] diff --git a/sdks/java/client/docs/GlusterfsVolumeSource.md b/sdks/java/client/docs/GlusterfsVolumeSource.md index e5d6cd338c17..1c990d29f9be 100644 --- a/sdks/java/client/docs/GlusterfsVolumeSource.md +++ b/sdks/java/client/docs/GlusterfsVolumeSource.md @@ -8,9 +8,9 @@ Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**endpoints** | **String** | EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | -**path** | **String** | Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | -**readOnly** | **Boolean** | ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | [optional] +**endpoints** | **String** | endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | +**path** | **String** | path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | +**readOnly** | **Boolean** | readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | [optional] diff --git a/sdks/java/client/docs/HTTPGetAction.md b/sdks/java/client/docs/HTTPGetAction.md index 5e823e946649..8853cce486ed 100644 --- a/sdks/java/client/docs/HTTPGetAction.md +++ b/sdks/java/client/docs/HTTPGetAction.md @@ -12,16 +12,7 @@ Name | Type | Description | Notes **httpHeaders** | [**List<HTTPHeader>**](HTTPHeader.md) | Custom headers to set in the request. HTTP allows repeated headers. | [optional] **path** | **String** | Path to access on the HTTP server. | [optional] **port** | **String** | | -**scheme** | [**SchemeEnum**](#SchemeEnum) | Scheme to use for connecting to the host. Defaults to HTTP. Possible enum values: - `\"HTTP\"` means that the scheme used will be http:// - `\"HTTPS\"` means that the scheme used will be https:// | [optional] - - - -## Enum: SchemeEnum - -Name | Value ----- | ----- -HTTP | "HTTP" -HTTPS | "HTTPS" +**scheme** | **String** | Scheme to use for connecting to the host. Defaults to HTTP. | [optional] diff --git a/sdks/java/client/docs/HTTPHeader.md b/sdks/java/client/docs/HTTPHeader.md index fda3427884b9..fb2c9bf0dedc 100644 --- a/sdks/java/client/docs/HTTPHeader.md +++ b/sdks/java/client/docs/HTTPHeader.md @@ -8,7 +8,7 @@ HTTPHeader describes a custom header to be used in HTTP probes Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **String** | The header field name | +**name** | **String** | The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. | **value** | **String** | The header field value | diff --git a/sdks/java/client/docs/HostPathVolumeSource.md b/sdks/java/client/docs/HostPathVolumeSource.md index 22e8b198174b..f59dbd285144 100644 --- a/sdks/java/client/docs/HostPathVolumeSource.md +++ b/sdks/java/client/docs/HostPathVolumeSource.md @@ -8,8 +8,8 @@ Represents a host path mapped into a pod. Host path volumes do not support owner Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**path** | **String** | Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | -**type** | **String** | Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | [optional] +**path** | **String** | path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | +**type** | **String** | type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | [optional] diff --git a/sdks/java/client/docs/ISCSIVolumeSource.md b/sdks/java/client/docs/ISCSIVolumeSource.md index 4911d2ad0f15..a9069d20927a 100644 --- a/sdks/java/client/docs/ISCSIVolumeSource.md +++ b/sdks/java/client/docs/ISCSIVolumeSource.md @@ -8,17 +8,17 @@ Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**chapAuthDiscovery** | **Boolean** | whether support iSCSI Discovery CHAP authentication | [optional] -**chapAuthSession** | **Boolean** | whether support iSCSI Session CHAP authentication | [optional] -**fsType** | **String** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi | [optional] -**initiatorName** | **String** | Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. | [optional] -**iqn** | **String** | Target iSCSI Qualified Name. | -**iscsiInterface** | **String** | iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). | [optional] -**lun** | **Integer** | iSCSI Target Lun number. | -**portals** | **List<String>** | iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | [optional] -**readOnly** | **Boolean** | ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. | [optional] +**chapAuthDiscovery** | **Boolean** | chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication | [optional] +**chapAuthSession** | **Boolean** | chapAuthSession defines whether support iSCSI Session CHAP authentication | [optional] +**fsType** | **String** | fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi | [optional] +**initiatorName** | **String** | initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. | [optional] +**iqn** | **String** | iqn is the target iSCSI Qualified Name. | +**iscsiInterface** | **String** | iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). | [optional] +**lun** | **Integer** | lun represents iSCSI Target Lun number. | +**portals** | **List<String>** | portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | [optional] +**readOnly** | **Boolean** | readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. | [optional] **secretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | [optional] -**targetPortal** | **String** | iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | +**targetPortal** | **String** | targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | diff --git a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md index 65acdaac8a2a..ea693551a614 100644 --- a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md +++ b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md @@ -19,6 +19,7 @@ Name | Type | Description | Notes **name** | **String** | Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. | **ports** | [**List<io.kubernetes.client.openapi.models.V1ContainerPort>**](io.kubernetes.client.openapi.models.V1ContainerPort.md) | List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. | [optional] **readinessProbe** | [**io.kubernetes.client.openapi.models.V1Probe**](io.kubernetes.client.openapi.models.V1Probe.md) | | [optional] +**resizePolicy** | [**List<ContainerResizePolicy>**](ContainerResizePolicy.md) | Resources resize policy for the container. | [optional] **resources** | [**io.kubernetes.client.openapi.models.V1ResourceRequirements**](io.kubernetes.client.openapi.models.V1ResourceRequirements.md) | | [optional] **securityContext** | [**io.kubernetes.client.openapi.models.V1SecurityContext**](io.kubernetes.client.openapi.models.V1SecurityContext.md) | | [optional] **startupProbe** | [**io.kubernetes.client.openapi.models.V1Probe**](io.kubernetes.client.openapi.models.V1Probe.md) | | [optional] diff --git a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md index d3d6b84ab07c..1868df81a649 100644 --- a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md +++ b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md @@ -19,6 +19,7 @@ Name | Type | Description | Notes **name** | **String** | Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. | [optional] **ports** | [**List<io.kubernetes.client.openapi.models.V1ContainerPort>**](io.kubernetes.client.openapi.models.V1ContainerPort.md) | List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. | [optional] **readinessProbe** | [**io.kubernetes.client.openapi.models.V1Probe**](io.kubernetes.client.openapi.models.V1Probe.md) | | [optional] +**resizePolicy** | [**List<ContainerResizePolicy>**](ContainerResizePolicy.md) | Resources resize policy for the container. | [optional] **resources** | [**io.kubernetes.client.openapi.models.V1ResourceRequirements**](io.kubernetes.client.openapi.models.V1ResourceRequirements.md) | | [optional] **securityContext** | [**io.kubernetes.client.openapi.models.V1SecurityContext**](io.kubernetes.client.openapi.models.V1SecurityContext.md) | | [optional] **source** | **String** | Source contains the source code of the script to execute | diff --git a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md index 03b0695a6818..3300bbbd1de9 100644 --- a/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md +++ b/sdks/java/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md @@ -20,6 +20,7 @@ Name | Type | Description | Notes **name** | **String** | Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. | **ports** | [**List<io.kubernetes.client.openapi.models.V1ContainerPort>**](io.kubernetes.client.openapi.models.V1ContainerPort.md) | List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. | [optional] **readinessProbe** | [**io.kubernetes.client.openapi.models.V1Probe**](io.kubernetes.client.openapi.models.V1Probe.md) | | [optional] +**resizePolicy** | [**List<ContainerResizePolicy>**](ContainerResizePolicy.md) | Resources resize policy for the container. | [optional] **resources** | [**io.kubernetes.client.openapi.models.V1ResourceRequirements**](io.kubernetes.client.openapi.models.V1ResourceRequirements.md) | | [optional] **securityContext** | [**io.kubernetes.client.openapi.models.V1SecurityContext**](io.kubernetes.client.openapi.models.V1SecurityContext.md) | | [optional] **startupProbe** | [**io.kubernetes.client.openapi.models.V1Probe**](io.kubernetes.client.openapi.models.V1Probe.md) | | [optional] diff --git a/sdks/java/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md b/sdks/java/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md index db398510aa12..a9594e722dce 100644 --- a/sdks/java/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md +++ b/sdks/java/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md @@ -11,6 +11,7 @@ Name | Type | Description | Notes **maxUnavailable** | **String** | | [optional] **minAvailable** | **String** | | [optional] **selector** | [**LabelSelector**](LabelSelector.md) | | [optional] +**unhealthyPodEvictionPolicy** | **String** | UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). | [optional] diff --git a/sdks/java/client/docs/KeyToPath.md b/sdks/java/client/docs/KeyToPath.md index 6060943745d3..b8c7cb9bd398 100644 --- a/sdks/java/client/docs/KeyToPath.md +++ b/sdks/java/client/docs/KeyToPath.md @@ -8,9 +8,9 @@ Maps a string key to a path within a volume. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**key** | **String** | The key to project. | -**mode** | **Integer** | Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] -**path** | **String** | The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. | +**key** | **String** | key is the key to project. | +**mode** | **Integer** | mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**path** | **String** | path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. | diff --git a/sdks/java/client/docs/NFSVolumeSource.md b/sdks/java/client/docs/NFSVolumeSource.md index b7f59eb2ce23..c0aace721912 100644 --- a/sdks/java/client/docs/NFSVolumeSource.md +++ b/sdks/java/client/docs/NFSVolumeSource.md @@ -8,9 +8,9 @@ Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not sup Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**path** | **String** | Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | -**readOnly** | **Boolean** | ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | [optional] -**server** | **String** | Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | +**path** | **String** | path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | +**readOnly** | **Boolean** | readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | [optional] +**server** | **String** | server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | diff --git a/sdks/java/client/docs/NodeSelectorRequirement.md b/sdks/java/client/docs/NodeSelectorRequirement.md index e99af5462852..87d5adedbe3f 100644 --- a/sdks/java/client/docs/NodeSelectorRequirement.md +++ b/sdks/java/client/docs/NodeSelectorRequirement.md @@ -9,21 +9,8 @@ A node selector requirement is a selector that contains values, a key, and an op Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **key** | **String** | The label key that the selector applies to. | -**operator** | [**OperatorEnum**](#OperatorEnum) | Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Possible enum values: - `\"DoesNotExist\"` - `\"Exists\"` - `\"Gt\"` - `\"In\"` - `\"Lt\"` - `\"NotIn\"` | +**operator** | **String** | Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. | **values** | **List<String>** | An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. | [optional] -## Enum: OperatorEnum - -Name | Value ----- | ----- -DOESNOTEXIST | "DoesNotExist" -EXISTS | "Exists" -GT | "Gt" -IN | "In" -LT | "Lt" -NOTIN | "NotIn" - - - diff --git a/sdks/java/client/docs/OwnerReference.md b/sdks/java/client/docs/OwnerReference.md index 58fcfa803ba2..a3d146cdaacc 100644 --- a/sdks/java/client/docs/OwnerReference.md +++ b/sdks/java/client/docs/OwnerReference.md @@ -9,11 +9,11 @@ OwnerReference contains enough information to let you identify an owning object. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **apiVersion** | **String** | API version of the referent. | -**blockOwnerDeletion** | **Boolean** | If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. | [optional] +**blockOwnerDeletion** | **Boolean** | If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. | [optional] **controller** | **Boolean** | If true, this reference points to the managing controller. | [optional] **kind** | **String** | Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | -**name** | **String** | Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names | -**uid** | **String** | UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids | +**name** | **String** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names | +**uid** | **String** | UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids | diff --git a/sdks/java/client/docs/PersistentVolumeClaimCondition.md b/sdks/java/client/docs/PersistentVolumeClaimCondition.md index 132eefc2335d..68c8fecfcb25 100644 --- a/sdks/java/client/docs/PersistentVolumeClaimCondition.md +++ b/sdks/java/client/docs/PersistentVolumeClaimCondition.md @@ -2,7 +2,7 @@ # PersistentVolumeClaimCondition -PersistentVolumeClaimCondition contails details about state of pvc +PersistentVolumeClaimCondition contains details about state of pvc ## Properties @@ -10,19 +10,10 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **lastProbeTime** | **java.time.Instant** | | [optional] **lastTransitionTime** | **java.time.Instant** | | [optional] -**message** | **String** | Human-readable message indicating details about last transition. | [optional] -**reason** | **String** | Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. | [optional] +**message** | **String** | message is the human-readable message indicating details about last transition. | [optional] +**reason** | **String** | reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. | [optional] **status** | **String** | | -**type** | [**TypeEnum**](#TypeEnum) | Possible enum values: - `\"FileSystemResizePending\"` - controller resize is finished and a file system resize is pending on node - `\"Resizing\"` - a user trigger resize of pvc has been started | - - - -## Enum: TypeEnum - -Name | Value ----- | ----- -FILESYSTEMRESIZEPENDING | "FileSystemResizePending" -RESIZING | "Resizing" +**type** | **String** | | diff --git a/sdks/java/client/docs/PersistentVolumeClaimSpec.md b/sdks/java/client/docs/PersistentVolumeClaimSpec.md index 2e6f88ea5acc..f9d6dda56df2 100644 --- a/sdks/java/client/docs/PersistentVolumeClaimSpec.md +++ b/sdks/java/client/docs/PersistentVolumeClaimSpec.md @@ -8,14 +8,14 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**accessModes** | **List<String>** | AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] +**accessModes** | **List<String>** | accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] **dataSource** | [**TypedLocalObjectReference**](TypedLocalObjectReference.md) | | [optional] -**dataSourceRef** | [**TypedLocalObjectReference**](TypedLocalObjectReference.md) | | [optional] +**dataSourceRef** | [**TypedObjectReference**](TypedObjectReference.md) | | [optional] **resources** | [**io.kubernetes.client.openapi.models.V1ResourceRequirements**](io.kubernetes.client.openapi.models.V1ResourceRequirements.md) | | [optional] **selector** | [**LabelSelector**](LabelSelector.md) | | [optional] -**storageClassName** | **String** | Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 | [optional] +**storageClassName** | **String** | storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 | [optional] **volumeMode** | **String** | volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. | [optional] -**volumeName** | **String** | VolumeName is the binding reference to the PersistentVolume backing this claim. | [optional] +**volumeName** | **String** | volumeName is the binding reference to the PersistentVolume backing this claim. | [optional] diff --git a/sdks/java/client/docs/PersistentVolumeClaimStatus.md b/sdks/java/client/docs/PersistentVolumeClaimStatus.md index 14ac6f808d69..199ace11efb1 100644 --- a/sdks/java/client/docs/PersistentVolumeClaimStatus.md +++ b/sdks/java/client/docs/PersistentVolumeClaimStatus.md @@ -8,22 +8,12 @@ PersistentVolumeClaimStatus is the current status of a persistent volume claim. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**accessModes** | **List<String>** | AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] -**allocatedResources** | **Map<String, String>** | The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] -**capacity** | **Map<String, String>** | Represents the actual resources of the underlying volume. | [optional] -**conditions** | [**List<PersistentVolumeClaimCondition>**](PersistentVolumeClaimCondition.md) | Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. | [optional] -**phase** | [**PhaseEnum**](#PhaseEnum) | Phase represents the current phase of PersistentVolumeClaim. Possible enum values: - `\"Bound\"` used for PersistentVolumeClaims that are bound - `\"Lost\"` used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost. - `\"Pending\"` used for PersistentVolumeClaims that are not yet bound | [optional] -**resizeStatus** | **String** | ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] - - - -## Enum: PhaseEnum - -Name | Value ----- | ----- -BOUND | "Bound" -LOST | "Lost" -PENDING | "Pending" +**accessModes** | **List<String>** | accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] +**allocatedResources** | **Map<String, String>** | allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] +**capacity** | **Map<String, String>** | capacity represents the actual resources of the underlying volume. | [optional] +**conditions** | [**List<PersistentVolumeClaimCondition>**](PersistentVolumeClaimCondition.md) | conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. | [optional] +**phase** | **String** | phase represents the current phase of PersistentVolumeClaim. | [optional] +**resizeStatus** | **String** | resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] diff --git a/sdks/java/client/docs/PersistentVolumeClaimVolumeSource.md b/sdks/java/client/docs/PersistentVolumeClaimVolumeSource.md index 35234c4856d8..2988529ab734 100644 --- a/sdks/java/client/docs/PersistentVolumeClaimVolumeSource.md +++ b/sdks/java/client/docs/PersistentVolumeClaimVolumeSource.md @@ -8,8 +8,8 @@ PersistentVolumeClaimVolumeSource references the user's PVC in the same namespac Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**claimName** | **String** | ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims | -**readOnly** | **Boolean** | Will force the ReadOnly setting in VolumeMounts. Default false. | [optional] +**claimName** | **String** | claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims | +**readOnly** | **Boolean** | readOnly Will force the ReadOnly setting in VolumeMounts. Default false. | [optional] diff --git a/sdks/java/client/docs/PhotonPersistentDiskVolumeSource.md b/sdks/java/client/docs/PhotonPersistentDiskVolumeSource.md index bf5b014e1c85..a46db4df446a 100644 --- a/sdks/java/client/docs/PhotonPersistentDiskVolumeSource.md +++ b/sdks/java/client/docs/PhotonPersistentDiskVolumeSource.md @@ -8,8 +8,8 @@ Represents a Photon Controller persistent disk resource. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**pdID** | **String** | ID that identifies Photon Controller persistent disk | +**fsType** | **String** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**pdID** | **String** | pdID is the ID that identifies Photon Controller persistent disk | diff --git a/sdks/java/client/docs/PodAffinityTerm.md b/sdks/java/client/docs/PodAffinityTerm.md index 72b6a8decc74..3e670df3c9a6 100644 --- a/sdks/java/client/docs/PodAffinityTerm.md +++ b/sdks/java/client/docs/PodAffinityTerm.md @@ -10,7 +10,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **labelSelector** | [**LabelSelector**](LabelSelector.md) | | [optional] **namespaceSelector** | [**LabelSelector**](LabelSelector.md) | | [optional] -**namespaces** | **List<String>** | namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\" | [optional] +**namespaces** | **List<String>** | namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". | [optional] **topologyKey** | **String** | This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. | diff --git a/sdks/java/client/docs/PortworxVolumeSource.md b/sdks/java/client/docs/PortworxVolumeSource.md index eed8c93db372..816af97a00f8 100644 --- a/sdks/java/client/docs/PortworxVolumeSource.md +++ b/sdks/java/client/docs/PortworxVolumeSource.md @@ -8,9 +8,9 @@ PortworxVolumeSource represents a Portworx volume resource. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**readOnly** | **Boolean** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] -**volumeID** | **String** | VolumeID uniquely identifies a Portworx volume | +**fsType** | **String** | fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**readOnly** | **Boolean** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**volumeID** | **String** | volumeID uniquely identifies a Portworx volume | diff --git a/sdks/java/client/docs/ProjectedVolumeSource.md b/sdks/java/client/docs/ProjectedVolumeSource.md index 55dae3dc239d..842bac85a0ea 100644 --- a/sdks/java/client/docs/ProjectedVolumeSource.md +++ b/sdks/java/client/docs/ProjectedVolumeSource.md @@ -8,8 +8,8 @@ Represents a projected volume source Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**defaultMode** | **Integer** | Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] -**sources** | [**List<VolumeProjection>**](VolumeProjection.md) | list of volume projections | [optional] +**defaultMode** | **Integer** | defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**sources** | [**List<VolumeProjection>**](VolumeProjection.md) | sources is the list of volume projections | [optional] diff --git a/sdks/java/client/docs/QuobyteVolumeSource.md b/sdks/java/client/docs/QuobyteVolumeSource.md index 20bbe37c4104..58ec00066e72 100644 --- a/sdks/java/client/docs/QuobyteVolumeSource.md +++ b/sdks/java/client/docs/QuobyteVolumeSource.md @@ -8,12 +8,12 @@ Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**group** | **String** | Group to map volume access to Default is no group | [optional] -**readOnly** | **Boolean** | ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. | [optional] -**registry** | **String** | Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes | -**tenant** | **String** | Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin | [optional] -**user** | **String** | User to map volume access to Defaults to serivceaccount user | [optional] -**volume** | **String** | Volume is a string that references an already created Quobyte volume by name. | +**group** | **String** | group to map volume access to Default is no group | [optional] +**readOnly** | **Boolean** | readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. | [optional] +**registry** | **String** | registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes | +**tenant** | **String** | tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin | [optional] +**user** | **String** | user to map volume access to Defaults to serivceaccount user | [optional] +**volume** | **String** | volume is a string that references an already created Quobyte volume by name. | diff --git a/sdks/java/client/docs/RBDVolumeSource.md b/sdks/java/client/docs/RBDVolumeSource.md index 0690aef84cdf..5e34fc4bd237 100644 --- a/sdks/java/client/docs/RBDVolumeSource.md +++ b/sdks/java/client/docs/RBDVolumeSource.md @@ -8,14 +8,14 @@ Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volu Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd | [optional] -**image** | **String** | The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | -**keyring** | **String** | Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] -**monitors** | **List<String>** | A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | -**pool** | **String** | The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] -**readOnly** | **Boolean** | ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**fsType** | **String** | fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd | [optional] +**image** | **String** | image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | +**keyring** | **String** | keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**monitors** | **List<String>** | monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | +**pool** | **String** | pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**readOnly** | **Boolean** | readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] **secretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | [optional] -**user** | **String** | The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**user** | **String** | user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] diff --git a/sdks/java/client/docs/ResourceClaim.md b/sdks/java/client/docs/ResourceClaim.md new file mode 100644 index 000000000000..60050a483e2b --- /dev/null +++ b/sdks/java/client/docs/ResourceClaim.md @@ -0,0 +1,14 @@ + + +# ResourceClaim + +ResourceClaim references one entry in PodSpec.ResourceClaims. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **String** | Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. | + + + diff --git a/sdks/java/client/docs/ResourceFieldSelector.md b/sdks/java/client/docs/ResourceFieldSelector.md index 675c47438f91..0ef949951421 100644 --- a/sdks/java/client/docs/ResourceFieldSelector.md +++ b/sdks/java/client/docs/ResourceFieldSelector.md @@ -9,7 +9,7 @@ ResourceFieldSelector represents container resources (cpu, memory) and their out Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **containerName** | **String** | Container name: required for volumes, optional for env vars | [optional] -**divisor** | **String** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] +**divisor** | **String** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] **resource** | **String** | Required: resource to select | diff --git a/sdks/java/client/docs/ScaleIOVolumeSource.md b/sdks/java/client/docs/ScaleIOVolumeSource.md index 708d9a9ed83d..772a5fd4bb9b 100644 --- a/sdks/java/client/docs/ScaleIOVolumeSource.md +++ b/sdks/java/client/docs/ScaleIOVolumeSource.md @@ -8,16 +8,16 @@ ScaleIOVolumeSource represents a persistent ScaleIO volume Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\". | [optional] -**gateway** | **String** | The host address of the ScaleIO API Gateway. | -**protectionDomain** | **String** | The name of the ScaleIO Protection Domain for the configured storage. | [optional] -**readOnly** | **Boolean** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**fsType** | **String** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\". | [optional] +**gateway** | **String** | gateway is the host address of the ScaleIO API Gateway. | +**protectionDomain** | **String** | protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. | [optional] +**readOnly** | **Boolean** | readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **secretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | -**sslEnabled** | **Boolean** | Flag to enable/disable SSL communication with Gateway, default false | [optional] -**storageMode** | **String** | Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. | [optional] -**storagePool** | **String** | The ScaleIO Storage Pool associated with the protection domain. | [optional] -**system** | **String** | The name of the storage system as configured in ScaleIO. | -**volumeName** | **String** | The name of a volume already created in the ScaleIO system that is associated with this volume source. | [optional] +**sslEnabled** | **Boolean** | sslEnabled Flag enable/disable SSL communication with Gateway, default false | [optional] +**storageMode** | **String** | storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. | [optional] +**storagePool** | **String** | storagePool is the ScaleIO Storage Pool associated with the protection domain. | [optional] +**system** | **String** | system is the name of the storage system as configured in ScaleIO. | +**volumeName** | **String** | volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. | [optional] diff --git a/sdks/java/client/docs/SeccompProfile.md b/sdks/java/client/docs/SeccompProfile.md index 6683967edb2a..b6a69ee0f67b 100644 --- a/sdks/java/client/docs/SeccompProfile.md +++ b/sdks/java/client/docs/SeccompProfile.md @@ -9,17 +9,7 @@ SeccompProfile defines a pod/container's seccomp profile settings. Only one prof Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **localhostProfile** | **String** | localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\". | [optional] -**type** | [**TypeEnum**](#TypeEnum) | type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Possible enum values: - `\"Localhost\"` indicates a profile defined in a file on the node should be used. The file's location relative to <kubelet-root-dir>/seccomp. - `\"RuntimeDefault\"` represents the default container runtime seccomp profile. - `\"Unconfined\"` indicates no seccomp profile is applied (A.K.A. unconfined). | - - - -## Enum: TypeEnum - -Name | Value ----- | ----- -LOCALHOST | "Localhost" -RUNTIMEDEFAULT | "RuntimeDefault" -UNCONFINED | "Unconfined" +**type** | **String** | type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. | diff --git a/sdks/java/client/docs/SecretProjection.md b/sdks/java/client/docs/SecretProjection.md index f520d6ee9ae2..ebecb130166f 100644 --- a/sdks/java/client/docs/SecretProjection.md +++ b/sdks/java/client/docs/SecretProjection.md @@ -8,9 +8,9 @@ Adapts a secret into a projected volume. The contents of the target Secret's Da Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**items** | [**List<KeyToPath>**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**items** | [**List<KeyToPath>**](KeyToPath.md) | items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] **name** | **String** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | [optional] -**optional** | **Boolean** | Specify whether the Secret or its key must be defined | [optional] +**optional** | **Boolean** | optional field specify whether the Secret or its key must be defined | [optional] diff --git a/sdks/java/client/docs/SecretVolumeSource.md b/sdks/java/client/docs/SecretVolumeSource.md index 42d2b994aa63..577dc47890cd 100644 --- a/sdks/java/client/docs/SecretVolumeSource.md +++ b/sdks/java/client/docs/SecretVolumeSource.md @@ -8,10 +8,10 @@ Adapts a Secret into a volume. The contents of the target Secret's Data field w Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**defaultMode** | **Integer** | Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] -**items** | [**List<KeyToPath>**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] -**optional** | **Boolean** | Specify whether the Secret or its keys must be defined | [optional] -**secretName** | **String** | Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret | [optional] +**defaultMode** | **Integer** | defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**items** | [**List<KeyToPath>**](KeyToPath.md) | items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**optional** | **Boolean** | optional field specify whether the Secret or its keys must be defined | [optional] +**secretName** | **String** | secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret | [optional] diff --git a/sdks/java/client/docs/SensorServiceApi.md b/sdks/java/client/docs/SensorServiceApi.md index 6f39d645fee4..e8595cd4156d 100644 --- a/sdks/java/client/docs/SensorServiceApi.md +++ b/sdks/java/client/docs/SensorServiceApi.md @@ -239,7 +239,7 @@ Name | Type | Description | Notes # **sensorServiceListSensors** -> IoArgoprojEventsV1alpha1SensorList sensorServiceListSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> IoArgoprojEventsV1alpha1SensorList sensorServiceListSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -275,8 +275,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - IoArgoprojEventsV1alpha1SensorList result = apiInstance.sensorServiceListSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + IoArgoprojEventsV1alpha1SensorList result = apiInstance.sensorServiceListSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling SensorServiceApi#sensorServiceListSensors"); @@ -303,6 +304,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -491,7 +493,7 @@ Name | Type | Description | Notes # **sensorServiceWatchSensors** -> StreamResultOfSensorSensorWatchEvent sensorServiceWatchSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> StreamResultOfSensorSensorWatchEvent sensorServiceWatchSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -527,8 +529,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - StreamResultOfSensorSensorWatchEvent result = apiInstance.sensorServiceWatchSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + StreamResultOfSensorSensorWatchEvent result = apiInstance.sensorServiceWatchSensors(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling SensorServiceApi#sensorServiceWatchSensors"); @@ -555,6 +558,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/java/client/docs/ServiceAccountTokenProjection.md b/sdks/java/client/docs/ServiceAccountTokenProjection.md index 3f3f58973c91..3a3ca95a70aa 100644 --- a/sdks/java/client/docs/ServiceAccountTokenProjection.md +++ b/sdks/java/client/docs/ServiceAccountTokenProjection.md @@ -8,9 +8,9 @@ ServiceAccountTokenProjection represents a projected service account token volum Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**audience** | **String** | Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. | [optional] -**expirationSeconds** | **Integer** | ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. | [optional] -**path** | **String** | Path is the path relative to the mount point of the file to project the token into. | +**audience** | **String** | audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. | [optional] +**expirationSeconds** | **Integer** | expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. | [optional] +**path** | **String** | path is the path relative to the mount point of the file to project the token into. | diff --git a/sdks/java/client/docs/ServicePort.md b/sdks/java/client/docs/ServicePort.md index b31853529c49..89d1726344a6 100644 --- a/sdks/java/client/docs/ServicePort.md +++ b/sdks/java/client/docs/ServicePort.md @@ -8,22 +8,12 @@ ServicePort contains information on service's port. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**appProtocol** | **String** | The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. | [optional] +**appProtocol** | **String** | The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. | [optional] **name** | **String** | The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service. | [optional] **nodePort** | **Integer** | The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport | [optional] **port** | **Integer** | The port that will be exposed by this service. | -**protocol** | [**ProtocolEnum**](#ProtocolEnum) | The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. Possible enum values: - `\"SCTP\"` is the SCTP protocol. - `\"TCP\"` is the TCP protocol. - `\"UDP\"` is the UDP protocol. | [optional] +**protocol** | **String** | The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. | [optional] **targetPort** | **String** | | [optional] -## Enum: ProtocolEnum - -Name | Value ----- | ----- -SCTP | "SCTP" -TCP | "TCP" -UDP | "UDP" - - - diff --git a/sdks/java/client/docs/StorageOSVolumeSource.md b/sdks/java/client/docs/StorageOSVolumeSource.md index cc4cd6b4a0b3..29fb323435a1 100644 --- a/sdks/java/client/docs/StorageOSVolumeSource.md +++ b/sdks/java/client/docs/StorageOSVolumeSource.md @@ -8,11 +8,11 @@ Represents a StorageOS persistent volume resource. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**readOnly** | **Boolean** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**fsType** | **String** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**readOnly** | **Boolean** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **secretRef** | [**io.kubernetes.client.openapi.models.V1LocalObjectReference**](io.kubernetes.client.openapi.models.V1LocalObjectReference.md) | | [optional] -**volumeName** | **String** | VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. | [optional] -**volumeNamespace** | **String** | VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. | [optional] +**volumeName** | **String** | volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. | [optional] +**volumeNamespace** | **String** | volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. | [optional] diff --git a/sdks/java/client/docs/TypedObjectReference.md b/sdks/java/client/docs/TypedObjectReference.md new file mode 100644 index 000000000000..9230e47d5268 --- /dev/null +++ b/sdks/java/client/docs/TypedObjectReference.md @@ -0,0 +1,16 @@ + + +# TypedObjectReference + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**apiGroup** | **String** | APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. | [optional] +**kind** | **String** | Kind is the type of resource being referenced | +**name** | **String** | Name is the name of resource being referenced | +**namespace** | **String** | Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. | [optional] + + + diff --git a/sdks/java/client/docs/VsphereVirtualDiskVolumeSource.md b/sdks/java/client/docs/VsphereVirtualDiskVolumeSource.md index 6247cbc38f3b..7cd44645ce55 100644 --- a/sdks/java/client/docs/VsphereVirtualDiskVolumeSource.md +++ b/sdks/java/client/docs/VsphereVirtualDiskVolumeSource.md @@ -8,10 +8,10 @@ Represents a vSphere volume resource. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fsType** | **String** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**storagePolicyID** | **String** | Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. | [optional] -**storagePolicyName** | **String** | Storage Policy Based Management (SPBM) profile name. | [optional] -**volumePath** | **String** | Path that identifies vSphere volume vmdk | +**fsType** | **String** | fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**storagePolicyID** | **String** | storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. | [optional] +**storagePolicyName** | **String** | storagePolicyName is the storage Policy Based Management (SPBM) profile name. | [optional] +**volumePath** | **String** | volumePath is the path that identifies vSphere volume vmdk | diff --git a/sdks/java/client/docs/WorkflowServiceApi.md b/sdks/java/client/docs/WorkflowServiceApi.md index 009ccf2d78a8..ac9e100fe7bb 100644 --- a/sdks/java/client/docs/WorkflowServiceApi.md +++ b/sdks/java/client/docs/WorkflowServiceApi.md @@ -323,7 +323,7 @@ Name | Type | Description | Notes # **workflowServiceListWorkflows** -> IoArgoprojWorkflowV1alpha1WorkflowList workflowServiceListWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, fields) +> IoArgoprojWorkflowV1alpha1WorkflowList workflowServiceListWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, fields) @@ -359,9 +359,10 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional String fields = "fields_example"; // String | Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\". try { - IoArgoprojWorkflowV1alpha1WorkflowList result = apiInstance.workflowServiceListWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, fields); + IoArgoprojWorkflowV1alpha1WorkflowList result = apiInstance.workflowServiceListWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, fields); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling WorkflowServiceApi#workflowServiceListWorkflows"); @@ -388,6 +389,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **fields** | **String**| Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\". | [optional] ### Return type @@ -1081,7 +1083,7 @@ Name | Type | Description | Notes # **workflowServiceWatchEvents** -> StreamResultOfEvent workflowServiceWatchEvents(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> StreamResultOfEvent workflowServiceWatchEvents(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -1117,8 +1119,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - StreamResultOfEvent result = apiInstance.workflowServiceWatchEvents(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + StreamResultOfEvent result = apiInstance.workflowServiceWatchEvents(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling WorkflowServiceApi#workflowServiceWatchEvents"); @@ -1145,6 +1148,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -1167,7 +1171,7 @@ Name | Type | Description | Notes # **workflowServiceWatchWorkflows** -> StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent workflowServiceWatchWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, fields) +> StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent workflowServiceWatchWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, fields) @@ -1203,9 +1207,10 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional String fields = "fields_example"; // String | try { - StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent result = apiInstance.workflowServiceWatchWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, fields); + StreamResultOfIoArgoprojWorkflowV1alpha1WorkflowWatchEvent result = apiInstance.workflowServiceWatchWorkflows(namespace, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents, fields); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling WorkflowServiceApi#workflowServiceWatchWorkflows"); @@ -1232,6 +1237,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **fields** | **String**| | [optional] ### Return type diff --git a/sdks/java/client/docs/WorkflowTemplateServiceApi.md b/sdks/java/client/docs/WorkflowTemplateServiceApi.md index e9b1d9611bed..27861298b4c9 100644 --- a/sdks/java/client/docs/WorkflowTemplateServiceApi.md +++ b/sdks/java/client/docs/WorkflowTemplateServiceApi.md @@ -308,7 +308,7 @@ Name | Type | Description | Notes # **workflowTemplateServiceListWorkflowTemplates** -> IoArgoprojWorkflowV1alpha1WorkflowTemplateList workflowTemplateServiceListWorkflowTemplates(namespace, namePattern, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue) +> IoArgoprojWorkflowV1alpha1WorkflowTemplateList workflowTemplateServiceListWorkflowTemplates(namespace, namePattern, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents) @@ -345,8 +345,9 @@ public class Example { String listOptionsTimeoutSeconds = "listOptionsTimeoutSeconds_example"; // String | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. String listOptionsLimit = "listOptionsLimit_example"; // String | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. String listOptionsContinue = "listOptionsContinue_example"; // String | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. + Boolean listOptionsSendInitialEvents = true; // Boolean | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional try { - IoArgoprojWorkflowV1alpha1WorkflowTemplateList result = apiInstance.workflowTemplateServiceListWorkflowTemplates(namespace, namePattern, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue); + IoArgoprojWorkflowV1alpha1WorkflowTemplateList result = apiInstance.workflowTemplateServiceListWorkflowTemplates(namespace, namePattern, listOptionsLabelSelector, listOptionsFieldSelector, listOptionsWatch, listOptionsAllowWatchBookmarks, listOptionsResourceVersion, listOptionsResourceVersionMatch, listOptionsTimeoutSeconds, listOptionsLimit, listOptionsContinue, listOptionsSendInitialEvents); System.out.println(result); } catch (ApiException e) { System.err.println("Exception when calling WorkflowTemplateServiceApi#workflowTemplateServiceListWorkflowTemplates"); @@ -374,6 +375,7 @@ Name | Type | Description | Notes **listOptionsTimeoutSeconds** | **String**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **listOptionsLimit** | **String**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **listOptionsContinue** | **String**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **listOptionsSendInitialEvents** | **Boolean**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/python/client/argo_workflows/api/archived_workflow_service_api.py b/sdks/python/client/argo_workflows/api/archived_workflow_service_api.py index 7f63506edef9..015dcdb27127 100644 --- a/sdks/python/client/argo_workflows/api/archived_workflow_service_api.py +++ b/sdks/python/client/argo_workflows/api/archived_workflow_service_api.py @@ -229,6 +229,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', 'namespace', ], 'required': [], @@ -263,6 +264,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), 'namespace': (str,), }, @@ -276,6 +279,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', 'namespace': 'namespace', }, 'location_map': { @@ -288,6 +292,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', 'namespace': 'query', }, 'collection_format_map': { @@ -323,6 +328,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', 'name_prefix', 'namespace', ], @@ -358,6 +364,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), 'name_prefix': (str,), 'namespace': @@ -373,6 +381,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', 'name_prefix': 'namePrefix', 'namespace': 'namespace', }, @@ -386,6 +395,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', 'name_prefix': 'query', 'namespace': 'query', }, @@ -770,6 +780,7 @@ def list_archived_workflow_label_values( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] namespace (str): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. @@ -852,6 +863,7 @@ def list_archived_workflows( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] name_prefix (str): [optional] namespace (str): [optional] _return_http_data_only (bool): response data without head status diff --git a/sdks/python/client/argo_workflows/api/cluster_workflow_template_service_api.py b/sdks/python/client/argo_workflows/api/cluster_workflow_template_service_api.py index 5053a8f21aa3..b83a630b9885 100644 --- a/sdks/python/client/argo_workflows/api/cluster_workflow_template_service_api.py +++ b/sdks/python/client/argo_workflows/api/cluster_workflow_template_service_api.py @@ -304,6 +304,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [], 'nullable': [ @@ -337,6 +338,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'list_options_label_selector': 'listOptions.labelSelector', @@ -348,6 +351,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'list_options_label_selector': 'query', @@ -359,6 +363,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -768,6 +773,7 @@ def list_cluster_workflow_templates( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object diff --git a/sdks/python/client/argo_workflows/api/cron_workflow_service_api.py b/sdks/python/client/argo_workflows/api/cron_workflow_service_api.py index e510d7300a69..0a924a8691ba 100644 --- a/sdks/python/client/argo_workflows/api/cron_workflow_service_api.py +++ b/sdks/python/client/argo_workflows/api/cron_workflow_service_api.py @@ -331,6 +331,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -368,6 +369,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -380,6 +383,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -392,6 +396,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -954,6 +959,7 @@ def list_cron_workflows( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object diff --git a/sdks/python/client/argo_workflows/api/event_service_api.py b/sdks/python/client/argo_workflows/api/event_service_api.py index 3c893df3ccb8..1c099bd82fb9 100644 --- a/sdks/python/client/argo_workflows/api/event_service_api.py +++ b/sdks/python/client/argo_workflows/api/event_service_api.py @@ -59,6 +59,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -96,6 +97,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -108,6 +111,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -120,6 +124,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -223,6 +228,7 @@ def list_workflow_event_bindings( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object diff --git a/sdks/python/client/argo_workflows/api/event_source_service_api.py b/sdks/python/client/argo_workflows/api/event_source_service_api.py index 874d90c19198..5fa6a9c2f1d7 100644 --- a/sdks/python/client/argo_workflows/api/event_source_service_api.py +++ b/sdks/python/client/argo_workflows/api/event_source_service_api.py @@ -388,6 +388,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -425,6 +426,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -437,6 +440,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -449,6 +453,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -548,6 +553,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -585,6 +591,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -597,6 +605,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -609,6 +618,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -988,6 +998,7 @@ def list_event_sources( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -1159,6 +1170,7 @@ def watch_event_sources( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object diff --git a/sdks/python/client/argo_workflows/api/sensor_service_api.py b/sdks/python/client/argo_workflows/api/sensor_service_api.py index c8e7d89c762b..429d31047626 100644 --- a/sdks/python/client/argo_workflows/api/sensor_service_api.py +++ b/sdks/python/client/argo_workflows/api/sensor_service_api.py @@ -272,6 +272,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -309,6 +310,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -321,6 +324,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -333,6 +337,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -548,6 +553,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -585,6 +591,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -597,6 +605,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -609,6 +618,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -898,6 +908,7 @@ def list_sensors( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -1159,6 +1170,7 @@ def watch_sensors( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object diff --git a/sdks/python/client/argo_workflows/api/workflow_service_api.py b/sdks/python/client/argo_workflows/api/workflow_service_api.py index 838e3545d1b9..a65cb7feded5 100644 --- a/sdks/python/client/argo_workflows/api/workflow_service_api.py +++ b/sdks/python/client/argo_workflows/api/workflow_service_api.py @@ -349,6 +349,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', 'fields', ], 'required': [ @@ -387,6 +388,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), 'fields': (str,), }, @@ -401,6 +404,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', 'fields': 'fields', }, 'location_map': { @@ -414,6 +418,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', 'fields': 'query', }, 'collection_format_map': { @@ -1079,6 +1084,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -1116,6 +1122,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -1128,6 +1136,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -1140,6 +1149,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -1175,6 +1185,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', 'fields', ], 'required': [ @@ -1213,6 +1224,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), 'fields': (str,), }, @@ -1227,6 +1240,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', 'fields': 'fields', }, 'location_map': { @@ -1240,6 +1254,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', 'fields': 'query', }, 'collection_format_map': { @@ -1735,6 +1750,7 @@ def list_workflows( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] fields (str): Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\".. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. @@ -2595,6 +2611,7 @@ def watch_events( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -2681,6 +2698,7 @@ def watch_workflows( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] fields (str): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. diff --git a/sdks/python/client/argo_workflows/api/workflow_template_service_api.py b/sdks/python/client/argo_workflows/api/workflow_template_service_api.py index 3b01cc57210a..f169a03bded6 100644 --- a/sdks/python/client/argo_workflows/api/workflow_template_service_api.py +++ b/sdks/python/client/argo_workflows/api/workflow_template_service_api.py @@ -330,6 +330,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds', 'list_options_limit', 'list_options_continue', + 'list_options_send_initial_events', ], 'required': [ 'namespace', @@ -369,6 +370,8 @@ def __init__(self, api_client=None): (str,), 'list_options_continue': (str,), + 'list_options_send_initial_events': + (bool,), }, 'attribute_map': { 'namespace': 'namespace', @@ -382,6 +385,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'listOptions.timeoutSeconds', 'list_options_limit': 'listOptions.limit', 'list_options_continue': 'listOptions.continue', + 'list_options_send_initial_events': 'listOptions.sendInitialEvents', }, 'location_map': { 'namespace': 'path', @@ -395,6 +399,7 @@ def __init__(self, api_client=None): 'list_options_timeout_seconds': 'query', 'list_options_limit': 'query', 'list_options_continue': 'query', + 'list_options_send_initial_events': 'query', }, 'collection_format_map': { } @@ -830,6 +835,7 @@ def list_workflow_templates( list_options_timeout_seconds (str): Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional.. [optional] list_options_limit (str): limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.. [optional] list_options_continue (str): The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.. [optional] + list_options_send_initial_events (bool): `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object diff --git a/sdks/python/client/argo_workflows/model/aws_elastic_block_store_volume_source.py b/sdks/python/client/argo_workflows/model/aws_elastic_block_store_volume_source.py index 2e920aa7dcd5..e7ddf91b2f02 100644 --- a/sdks/python/client/argo_workflows/model/aws_elastic_block_store_volume_source.py +++ b/sdks/python/client/argo_workflows/model/aws_elastic_block_store_volume_source.py @@ -110,7 +110,7 @@ def _from_openapi_data(cls, volume_id, *args, **kwargs): # noqa: E501 """AWSElasticBlockStoreVolumeSource - a model defined in OpenAPI Args: - volume_id (str): Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + volume_id (str): volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -143,9 +143,9 @@ def _from_openapi_data(cls, volume_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 - partition (int): The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).. [optional] # noqa: E501 - read_only (bool): Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 + partition (int): partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).. [optional] # noqa: E501 + read_only (bool): readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -198,7 +198,7 @@ def __init__(self, volume_id, *args, **kwargs): # noqa: E501 """AWSElasticBlockStoreVolumeSource - a model defined in OpenAPI Args: - volume_id (str): Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + volume_id (str): volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -231,9 +231,9 @@ def __init__(self, volume_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 - partition (int): The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).. [optional] # noqa: E501 - read_only (bool): Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 + partition (int): partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).. [optional] # noqa: E501 + read_only (bool): readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/azure_disk_volume_source.py b/sdks/python/client/argo_workflows/model/azure_disk_volume_source.py index 17ee5b9f0c8e..2d757f74dfe7 100644 --- a/sdks/python/client/argo_workflows/model/azure_disk_volume_source.py +++ b/sdks/python/client/argo_workflows/model/azure_disk_volume_source.py @@ -114,8 +114,8 @@ def _from_openapi_data(cls, disk_name, disk_uri, *args, **kwargs): # noqa: E501 """AzureDiskVolumeSource - a model defined in OpenAPI Args: - disk_name (str): The Name of the data disk in the blob storage - disk_uri (str): The URI the data disk in the blob storage + disk_name (str): diskName is the Name of the data disk in the blob storage + disk_uri (str): diskURI is the URI of data disk in the blob storage Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -148,10 +148,10 @@ def _from_openapi_data(cls, disk_name, disk_uri, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - caching_mode (str): Host Caching mode: None, Read Only, Read Write.. [optional] # noqa: E501 - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - kind (str): Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + caching_mode (str): cachingMode is the Host Caching mode: None, Read Only, Read Write.. [optional] # noqa: E501 + fs_type (str): fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + kind (str): kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared. [optional] # noqa: E501 + read_only (bool): readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -205,8 +205,8 @@ def __init__(self, disk_name, disk_uri, *args, **kwargs): # noqa: E501 """AzureDiskVolumeSource - a model defined in OpenAPI Args: - disk_name (str): The Name of the data disk in the blob storage - disk_uri (str): The URI the data disk in the blob storage + disk_name (str): diskName is the Name of the data disk in the blob storage + disk_uri (str): diskURI is the URI of data disk in the blob storage Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -239,10 +239,10 @@ def __init__(self, disk_name, disk_uri, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - caching_mode (str): Host Caching mode: None, Read Only, Read Write.. [optional] # noqa: E501 - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - kind (str): Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + caching_mode (str): cachingMode is the Host Caching mode: None, Read Only, Read Write.. [optional] # noqa: E501 + fs_type (str): fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + kind (str): kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared. [optional] # noqa: E501 + read_only (bool): readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/azure_file_volume_source.py b/sdks/python/client/argo_workflows/model/azure_file_volume_source.py index 506429512cb4..a2b51193caf6 100644 --- a/sdks/python/client/argo_workflows/model/azure_file_volume_source.py +++ b/sdks/python/client/argo_workflows/model/azure_file_volume_source.py @@ -108,8 +108,8 @@ def _from_openapi_data(cls, secret_name, share_name, *args, **kwargs): # noqa: """AzureFileVolumeSource - a model defined in OpenAPI Args: - secret_name (str): the name of secret that contains Azure Storage Account Name and Key - share_name (str): Share Name + secret_name (str): secretName is the name of secret that contains Azure Storage Account Name and Key + share_name (str): shareName is the azure share Name Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -142,7 +142,7 @@ def _from_openapi_data(cls, secret_name, share_name, *args, **kwargs): # noqa: Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -196,8 +196,8 @@ def __init__(self, secret_name, share_name, *args, **kwargs): # noqa: E501 """AzureFileVolumeSource - a model defined in OpenAPI Args: - secret_name (str): the name of secret that contains Azure Storage Account Name and Key - share_name (str): Share Name + secret_name (str): secretName is the name of secret that contains Azure Storage Account Name and Key + share_name (str): shareName is the azure share Name Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -230,7 +230,7 @@ def __init__(self, secret_name, share_name, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/ceph_fs_volume_source.py b/sdks/python/client/argo_workflows/model/ceph_fs_volume_source.py index 9c998a222022..1ac3c7ea0db1 100644 --- a/sdks/python/client/argo_workflows/model/ceph_fs_volume_source.py +++ b/sdks/python/client/argo_workflows/model/ceph_fs_volume_source.py @@ -120,7 +120,7 @@ def _from_openapi_data(cls, monitors, *args, **kwargs): # noqa: E501 """CephFSVolumeSource - a model defined in OpenAPI Args: - monitors ([str]): Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + monitors ([str]): monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -153,11 +153,11 @@ def _from_openapi_data(cls, monitors, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - path (str): Optional: Used as the mounted root, rather than the full Ceph tree, default is /. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 - secret_file (str): Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 + path (str): path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /. [optional] # noqa: E501 + read_only (bool): readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 + secret_file (str): secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 - user (str): Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 + user (str): user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -210,7 +210,7 @@ def __init__(self, monitors, *args, **kwargs): # noqa: E501 """CephFSVolumeSource - a model defined in OpenAPI Args: - monitors ([str]): Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + monitors ([str]): monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -243,11 +243,11 @@ def __init__(self, monitors, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - path (str): Optional: Used as the mounted root, rather than the full Ceph tree, default is /. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 - secret_file (str): Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 + path (str): path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /. [optional] # noqa: E501 + read_only (bool): readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 + secret_file (str): secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 - user (str): Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 + user (str): user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/cinder_volume_source.py b/sdks/python/client/argo_workflows/model/cinder_volume_source.py index fec29d096506..7730c4b7d05f 100644 --- a/sdks/python/client/argo_workflows/model/cinder_volume_source.py +++ b/sdks/python/client/argo_workflows/model/cinder_volume_source.py @@ -116,7 +116,7 @@ def _from_openapi_data(cls, volume_id, *args, **kwargs): # noqa: E501 """CinderVolumeSource - a model defined in OpenAPI Args: - volume_id (str): volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + volume_id (str): volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -149,8 +149,8 @@ def _from_openapi_data(cls, volume_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 """ @@ -204,7 +204,7 @@ def __init__(self, volume_id, *args, **kwargs): # noqa: E501 """CinderVolumeSource - a model defined in OpenAPI Args: - volume_id (str): volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md + volume_id (str): volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -237,8 +237,8 @@ def __init__(self, volume_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 """ diff --git a/sdks/python/client/argo_workflows/model/config_map_projection.py b/sdks/python/client/argo_workflows/model/config_map_projection.py index c806a2c2f636..713bd3800d4a 100644 --- a/sdks/python/client/argo_workflows/model/config_map_projection.py +++ b/sdks/python/client/argo_workflows/model/config_map_projection.py @@ -144,9 +144,9 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + items ([KeyToPath]): items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names. [optional] # noqa: E501 - optional (bool): Specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 + optional (bool): optional specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -228,9 +228,9 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + items ([KeyToPath]): items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names. [optional] # noqa: E501 - optional (bool): Specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 + optional (bool): optional specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/config_map_volume_source.py b/sdks/python/client/argo_workflows/model/config_map_volume_source.py index 1cbd2ecced62..1a9218d505aa 100644 --- a/sdks/python/client/argo_workflows/model/config_map_volume_source.py +++ b/sdks/python/client/argo_workflows/model/config_map_volume_source.py @@ -146,10 +146,10 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - default_mode (int): Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + default_mode (int): defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + items ([KeyToPath]): items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names. [optional] # noqa: E501 - optional (bool): Specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 + optional (bool): optional specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -231,10 +231,10 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - default_mode (int): Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + default_mode (int): defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + items ([KeyToPath]): items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names. [optional] # noqa: E501 - optional (bool): Specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 + optional (bool): optional specify whether the ConfigMap or its keys must be defined. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/container.py b/sdks/python/client/argo_workflows/model/container.py index 228ff2fabb8a..d4574eb9f2fb 100644 --- a/sdks/python/client/argo_workflows/model/container.py +++ b/sdks/python/client/argo_workflows/model/container.py @@ -31,6 +31,7 @@ def lazy_import(): from argo_workflows.model.container_port import ContainerPort + from argo_workflows.model.container_resize_policy import ContainerResizePolicy from argo_workflows.model.env_from_source import EnvFromSource from argo_workflows.model.env_var import EnvVar from argo_workflows.model.lifecycle import Lifecycle @@ -40,6 +41,7 @@ def lazy_import(): from argo_workflows.model.volume_device import VolumeDevice from argo_workflows.model.volume_mount import VolumeMount globals()['ContainerPort'] = ContainerPort + globals()['ContainerResizePolicy'] = ContainerResizePolicy globals()['EnvFromSource'] = EnvFromSource globals()['EnvVar'] = EnvVar globals()['Lifecycle'] = Lifecycle @@ -75,15 +77,6 @@ class Container(ModelNormal): """ allowed_values = { - ('image_pull_policy',): { - 'ALWAYS': "Always", - 'IFNOTPRESENT': "IfNotPresent", - 'NEVER': "Never", - }, - ('termination_message_policy',): { - 'FALLBACKTOLOGSONERROR': "FallbackToLogsOnError", - 'FILE': "File", - }, } validations = { @@ -123,6 +116,7 @@ def openapi_types(): 'name': (str,), # noqa: E501 'ports': ([ContainerPort],), # noqa: E501 'readiness_probe': (Probe,), # noqa: E501 + 'resize_policy': ([ContainerResizePolicy],), # noqa: E501 'resources': (ResourceRequirements,), # noqa: E501 'security_context': (SecurityContext,), # noqa: E501 'startup_probe': (Probe,), # noqa: E501 @@ -153,6 +147,7 @@ def discriminator(): 'name': 'name', # noqa: E501 'ports': 'ports', # noqa: E501 'readiness_probe': 'readinessProbe', # noqa: E501 + 'resize_policy': 'resizePolicy', # noqa: E501 'resources': 'resources', # noqa: E501 'security_context': 'securityContext', # noqa: E501 'startup_probe': 'startupProbe', # noqa: E501 @@ -177,7 +172,7 @@ def _from_openapi_data(cls, image, *args, **kwargs): # noqa: E501 """Container - a model defined in OpenAPI Args: - image (str): Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + image (str): Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -210,23 +205,24 @@ def _from_openapi_data(cls, image, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - args ([str]): Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 - command ([str]): Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 + args ([str]): Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 + command ([str]): Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 env ([EnvVar]): List of environment variables to set in the container. Cannot be updated.. [optional] # noqa: E501 env_from ([EnvFromSource]): List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.. [optional] # noqa: E501 - image_pull_policy (str): Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images Possible enum values: - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present. [optional] # noqa: E501 + image_pull_policy (str): Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images. [optional] # noqa: E501 lifecycle (Lifecycle): [optional] # noqa: E501 liveness_probe (Probe): [optional] # noqa: E501 name (str): Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.. [optional] # noqa: E501 - ports ([ContainerPort]): List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.. [optional] # noqa: E501 + ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 stdin (bool): Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.. [optional] # noqa: E501 stdin_once (bool): Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false. [optional] # noqa: E501 termination_message_path (str): Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.. [optional] # noqa: E501 - termination_message_policy (str): Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. Possible enum values: - `\"FallbackToLogsOnError\"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents. - `\"File\"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits.. [optional] # noqa: E501 + termination_message_policy (str): Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.. [optional] # noqa: E501 tty (bool): Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.. [optional] # noqa: E501 volume_devices ([VolumeDevice]): volumeDevices is the list of block devices to be used by the container.. [optional] # noqa: E501 volume_mounts ([VolumeMount]): Pod volumes to mount into the container's filesystem. Cannot be updated.. [optional] # noqa: E501 @@ -283,7 +279,7 @@ def __init__(self, image, *args, **kwargs): # noqa: E501 """Container - a model defined in OpenAPI Args: - image (str): Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. + image (str): Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -316,23 +312,24 @@ def __init__(self, image, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - args ([str]): Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 - command ([str]): Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 + args ([str]): Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 + command ([str]): Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell. [optional] # noqa: E501 env ([EnvVar]): List of environment variables to set in the container. Cannot be updated.. [optional] # noqa: E501 env_from ([EnvFromSource]): List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.. [optional] # noqa: E501 - image_pull_policy (str): Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images Possible enum values: - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present. [optional] # noqa: E501 + image_pull_policy (str): Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images. [optional] # noqa: E501 lifecycle (Lifecycle): [optional] # noqa: E501 liveness_probe (Probe): [optional] # noqa: E501 name (str): Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.. [optional] # noqa: E501 - ports ([ContainerPort]): List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.. [optional] # noqa: E501 + ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 stdin (bool): Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.. [optional] # noqa: E501 stdin_once (bool): Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false. [optional] # noqa: E501 termination_message_path (str): Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.. [optional] # noqa: E501 - termination_message_policy (str): Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. Possible enum values: - `\"FallbackToLogsOnError\"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents. - `\"File\"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits.. [optional] # noqa: E501 + termination_message_policy (str): Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.. [optional] # noqa: E501 tty (bool): Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.. [optional] # noqa: E501 volume_devices ([VolumeDevice]): volumeDevices is the list of block devices to be used by the container.. [optional] # noqa: E501 volume_mounts ([VolumeMount]): Pod volumes to mount into the container's filesystem. Cannot be updated.. [optional] # noqa: E501 diff --git a/sdks/python/client/argo_workflows/model/container_port.py b/sdks/python/client/argo_workflows/model/container_port.py index 15c182a4735c..ca2eb2373fa2 100644 --- a/sdks/python/client/argo_workflows/model/container_port.py +++ b/sdks/python/client/argo_workflows/model/container_port.py @@ -55,11 +55,6 @@ class ContainerPort(ModelNormal): """ allowed_values = { - ('protocol',): { - 'SCTP': "SCTP", - 'TCP': "TCP", - 'UDP': "UDP", - }, } validations = { @@ -153,7 +148,7 @@ def _from_openapi_data(cls, container_port, *args, **kwargs): # noqa: E501 host_ip (str): What host IP to bind the external port to.. [optional] # noqa: E501 host_port (int): Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.. [optional] # noqa: E501 name (str): If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.. [optional] # noqa: E501 - protocol (str): Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\". Possible enum values: - `\"SCTP\"` is the SCTP protocol. - `\"TCP\"` is the TCP protocol. - `\"UDP\"` is the UDP protocol.. [optional] # noqa: E501 + protocol (str): Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -242,7 +237,7 @@ def __init__(self, container_port, *args, **kwargs): # noqa: E501 host_ip (str): What host IP to bind the external port to.. [optional] # noqa: E501 host_port (int): Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.. [optional] # noqa: E501 name (str): If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.. [optional] # noqa: E501 - protocol (str): Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\". Possible enum values: - `\"SCTP\"` is the SCTP protocol. - `\"TCP\"` is the TCP protocol. - `\"UDP\"` is the UDP protocol.. [optional] # noqa: E501 + protocol (str): Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/container_resize_policy.py b/sdks/python/client/argo_workflows/model/container_resize_policy.py new file mode 100644 index 000000000000..13442708c88b --- /dev/null +++ b/sdks/python/client/argo_workflows/model/container_resize_policy.py @@ -0,0 +1,267 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from argo_workflows.exceptions import ApiAttributeError + + + +class ContainerResizePolicy(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + 'resource_name': (str,), # noqa: E501 + 'restart_policy': (str,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + + attribute_map = { + 'resource_name': 'resourceName', # noqa: E501 + 'restart_policy': 'restartPolicy', # noqa: E501 + } + + read_only_vars = { + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, resource_name, restart_policy, *args, **kwargs): # noqa: E501 + """ContainerResizePolicy - a model defined in OpenAPI + + Args: + resource_name (str): Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + restart_policy (str): Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.resource_name = resource_name + self.restart_policy = restart_policy + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, resource_name, restart_policy, *args, **kwargs): # noqa: E501 + """ContainerResizePolicy - a model defined in OpenAPI + + Args: + resource_name (str): Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. + restart_policy (str): Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.resource_name = resource_name + self.restart_policy = restart_policy + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/sdks/python/client/argo_workflows/model/csi_volume_source.py b/sdks/python/client/argo_workflows/model/csi_volume_source.py index cfa528787e97..0dbb5a1153c5 100644 --- a/sdks/python/client/argo_workflows/model/csi_volume_source.py +++ b/sdks/python/client/argo_workflows/model/csi_volume_source.py @@ -118,7 +118,7 @@ def _from_openapi_data(cls, driver, *args, **kwargs): # noqa: E501 """CSIVolumeSource - a model defined in OpenAPI Args: - driver (str): Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + driver (str): driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -151,10 +151,10 @@ def _from_openapi_data(cls, driver, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.. [optional] # noqa: E501 + fs_type (str): fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.. [optional] # noqa: E501 node_publish_secret_ref (LocalObjectReference): [optional] # noqa: E501 - read_only (bool): Specifies a read-only configuration for the volume. Defaults to false (read/write).. [optional] # noqa: E501 - volume_attributes ({str: (str,)}): VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.. [optional] # noqa: E501 + read_only (bool): readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).. [optional] # noqa: E501 + volume_attributes ({str: (str,)}): volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -207,7 +207,7 @@ def __init__(self, driver, *args, **kwargs): # noqa: E501 """CSIVolumeSource - a model defined in OpenAPI Args: - driver (str): Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + driver (str): driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -240,10 +240,10 @@ def __init__(self, driver, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.. [optional] # noqa: E501 + fs_type (str): fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.. [optional] # noqa: E501 node_publish_secret_ref (LocalObjectReference): [optional] # noqa: E501 - read_only (bool): Specifies a read-only configuration for the volume. Defaults to false (read/write).. [optional] # noqa: E501 - volume_attributes ({str: (str,)}): VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.. [optional] # noqa: E501 + read_only (bool): readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).. [optional] # noqa: E501 + volume_attributes ({str: (str,)}): volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/empty_dir_volume_source.py b/sdks/python/client/argo_workflows/model/empty_dir_volume_source.py index e8ccff6a2235..0552646f0ed5 100644 --- a/sdks/python/client/argo_workflows/model/empty_dir_volume_source.py +++ b/sdks/python/client/argo_workflows/model/empty_dir_volume_source.py @@ -136,8 +136,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - medium (str): What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir. [optional] # noqa: E501 - size_limit (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 + medium (str): medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir. [optional] # noqa: E501 + size_limit (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -219,8 +219,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - medium (str): What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir. [optional] # noqa: E501 - size_limit (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 + medium (str): medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir. [optional] # noqa: E501 + size_limit (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/fc_volume_source.py b/sdks/python/client/argo_workflows/model/fc_volume_source.py index 1c87fc425e23..2460c6b086d5 100644 --- a/sdks/python/client/argo_workflows/model/fc_volume_source.py +++ b/sdks/python/client/argo_workflows/model/fc_volume_source.py @@ -142,11 +142,11 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - lun (int): Optional: FC target lun number. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 - target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501 - wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + lun (int): lun is Optional: FC target lun number. [optional] # noqa: E501 + read_only (bool): readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + target_wwns ([str]): targetWWNs is Optional: FC target worldwide names (WWNs). [optional] # noqa: E501 + wwids ([str]): wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -228,11 +228,11 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - lun (int): Optional: FC target lun number. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 - target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501 - wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + lun (int): lun is Optional: FC target lun number. [optional] # noqa: E501 + read_only (bool): readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + target_wwns ([str]): targetWWNs is Optional: FC target worldwide names (WWNs). [optional] # noqa: E501 + wwids ([str]): wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/flex_volume_source.py b/sdks/python/client/argo_workflows/model/flex_volume_source.py index 910e2cb85a23..bf593a370c36 100644 --- a/sdks/python/client/argo_workflows/model/flex_volume_source.py +++ b/sdks/python/client/argo_workflows/model/flex_volume_source.py @@ -118,7 +118,7 @@ def _from_openapi_data(cls, driver, *args, **kwargs): # noqa: E501 """FlexVolumeSource - a model defined in OpenAPI Args: - driver (str): Driver is the name of the driver to use for this volume. + driver (str): driver is the name of the driver to use for this volume. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -151,9 +151,9 @@ def _from_openapi_data(cls, driver, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.. [optional] # noqa: E501 - options ({str: (str,)}): Optional: Extra command options if any.. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.. [optional] # noqa: E501 + options ({str: (str,)}): options is Optional: this field holds extra command options if any.. [optional] # noqa: E501 + read_only (bool): readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 """ @@ -207,7 +207,7 @@ def __init__(self, driver, *args, **kwargs): # noqa: E501 """FlexVolumeSource - a model defined in OpenAPI Args: - driver (str): Driver is the name of the driver to use for this volume. + driver (str): driver is the name of the driver to use for this volume. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -240,9 +240,9 @@ def __init__(self, driver, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.. [optional] # noqa: E501 - options ({str: (str,)}): Optional: Extra command options if any.. [optional] # noqa: E501 - read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.. [optional] # noqa: E501 + options ({str: (str,)}): options is Optional: this field holds extra command options if any.. [optional] # noqa: E501 + read_only (bool): readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 """ diff --git a/sdks/python/client/argo_workflows/model/flocker_volume_source.py b/sdks/python/client/argo_workflows/model/flocker_volume_source.py index a362f844f36e..768edc753698 100644 --- a/sdks/python/client/argo_workflows/model/flocker_volume_source.py +++ b/sdks/python/client/argo_workflows/model/flocker_volume_source.py @@ -136,8 +136,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - dataset_name (str): Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated. [optional] # noqa: E501 - dataset_uuid (str): UUID of the dataset. This is unique identifier of a Flocker dataset. [optional] # noqa: E501 + dataset_name (str): datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated. [optional] # noqa: E501 + dataset_uuid (str): datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -219,8 +219,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - dataset_name (str): Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated. [optional] # noqa: E501 - dataset_uuid (str): UUID of the dataset. This is unique identifier of a Flocker dataset. [optional] # noqa: E501 + dataset_name (str): datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated. [optional] # noqa: E501 + dataset_uuid (str): datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/gce_persistent_disk_volume_source.py b/sdks/python/client/argo_workflows/model/gce_persistent_disk_volume_source.py index 46f9a5b1d50d..5c4d824f3398 100644 --- a/sdks/python/client/argo_workflows/model/gce_persistent_disk_volume_source.py +++ b/sdks/python/client/argo_workflows/model/gce_persistent_disk_volume_source.py @@ -110,7 +110,7 @@ def _from_openapi_data(cls, pd_name, *args, **kwargs): # noqa: E501 """GCEPersistentDiskVolumeSource - a model defined in OpenAPI Args: - pd_name (str): Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + pd_name (str): pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -143,9 +143,9 @@ def _from_openapi_data(cls, pd_name, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 - partition (int): The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 + fs_type (str): fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 + partition (int): partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 + read_only (bool): readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -198,7 +198,7 @@ def __init__(self, pd_name, *args, **kwargs): # noqa: E501 """GCEPersistentDiskVolumeSource - a model defined in OpenAPI Args: - pd_name (str): Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + pd_name (str): pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -231,9 +231,9 @@ def __init__(self, pd_name, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 - partition (int): The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 + fs_type (str): fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 + partition (int): partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 + read_only (bool): readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/git_repo_volume_source.py b/sdks/python/client/argo_workflows/model/git_repo_volume_source.py index ad898421a0fc..ef1983cebee0 100644 --- a/sdks/python/client/argo_workflows/model/git_repo_volume_source.py +++ b/sdks/python/client/argo_workflows/model/git_repo_volume_source.py @@ -108,7 +108,7 @@ def _from_openapi_data(cls, repository, *args, **kwargs): # noqa: E501 """GitRepoVolumeSource - a model defined in OpenAPI Args: - repository (str): Repository URL + repository (str): repository is the URL Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -141,8 +141,8 @@ def _from_openapi_data(cls, repository, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - directory (str): Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.. [optional] # noqa: E501 - revision (str): Commit hash for the specified revision.. [optional] # noqa: E501 + directory (str): directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.. [optional] # noqa: E501 + revision (str): revision is the commit hash for the specified revision.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -195,7 +195,7 @@ def __init__(self, repository, *args, **kwargs): # noqa: E501 """GitRepoVolumeSource - a model defined in OpenAPI Args: - repository (str): Repository URL + repository (str): repository is the URL Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -228,8 +228,8 @@ def __init__(self, repository, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - directory (str): Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.. [optional] # noqa: E501 - revision (str): Commit hash for the specified revision.. [optional] # noqa: E501 + directory (str): directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.. [optional] # noqa: E501 + revision (str): revision is the commit hash for the specified revision.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/glusterfs_volume_source.py b/sdks/python/client/argo_workflows/model/glusterfs_volume_source.py index 6d39843cc0b1..1491e33cb763 100644 --- a/sdks/python/client/argo_workflows/model/glusterfs_volume_source.py +++ b/sdks/python/client/argo_workflows/model/glusterfs_volume_source.py @@ -108,8 +108,8 @@ def _from_openapi_data(cls, endpoints, path, *args, **kwargs): # noqa: E501 """GlusterfsVolumeSource - a model defined in OpenAPI Args: - endpoints (str): EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - path (str): Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + endpoints (str): endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + path (str): path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -142,7 +142,7 @@ def _from_openapi_data(cls, endpoints, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod. [optional] # noqa: E501 + read_only (bool): readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -196,8 +196,8 @@ def __init__(self, endpoints, path, *args, **kwargs): # noqa: E501 """GlusterfsVolumeSource - a model defined in OpenAPI Args: - endpoints (str): EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - path (str): Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + endpoints (str): endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + path (str): path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -230,7 +230,7 @@ def __init__(self, endpoints, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod. [optional] # noqa: E501 + read_only (bool): readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/host_path_volume_source.py b/sdks/python/client/argo_workflows/model/host_path_volume_source.py index 6f9191ddf18c..88cd8206f47e 100644 --- a/sdks/python/client/argo_workflows/model/host_path_volume_source.py +++ b/sdks/python/client/argo_workflows/model/host_path_volume_source.py @@ -106,7 +106,7 @@ def _from_openapi_data(cls, path, *args, **kwargs): # noqa: E501 """HostPathVolumeSource - a model defined in OpenAPI Args: - path (str): Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + path (str): path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -139,7 +139,7 @@ def _from_openapi_data(cls, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - type (str): Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath. [optional] # noqa: E501 + type (str): type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -192,7 +192,7 @@ def __init__(self, path, *args, **kwargs): # noqa: E501 """HostPathVolumeSource - a model defined in OpenAPI Args: - path (str): Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + path (str): path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -225,7 +225,7 @@ def __init__(self, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - type (str): Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath. [optional] # noqa: E501 + type (str): type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/http_get_action.py b/sdks/python/client/argo_workflows/model/http_get_action.py index 86a125d711f6..720481ca12c4 100644 --- a/sdks/python/client/argo_workflows/model/http_get_action.py +++ b/sdks/python/client/argo_workflows/model/http_get_action.py @@ -59,10 +59,6 @@ class HTTPGetAction(ModelNormal): """ allowed_values = { - ('scheme',): { - 'HTTP': "HTTP", - 'HTTPS': "HTTPS", - }, } validations = { @@ -158,7 +154,7 @@ def _from_openapi_data(cls, port, *args, **kwargs): # noqa: E501 host (str): Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.. [optional] # noqa: E501 http_headers ([HTTPHeader]): Custom headers to set in the request. HTTP allows repeated headers.. [optional] # noqa: E501 path (str): Path to access on the HTTP server.. [optional] # noqa: E501 - scheme (str): Scheme to use for connecting to the host. Defaults to HTTP. Possible enum values: - `\"HTTP\"` means that the scheme used will be http:// - `\"HTTPS\"` means that the scheme used will be https://. [optional] # noqa: E501 + scheme (str): Scheme to use for connecting to the host. Defaults to HTTP.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -247,7 +243,7 @@ def __init__(self, port, *args, **kwargs): # noqa: E501 host (str): Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.. [optional] # noqa: E501 http_headers ([HTTPHeader]): Custom headers to set in the request. HTTP allows repeated headers.. [optional] # noqa: E501 path (str): Path to access on the HTTP server.. [optional] # noqa: E501 - scheme (str): Scheme to use for connecting to the host. Defaults to HTTP. Possible enum values: - `\"HTTP\"` means that the scheme used will be http:// - `\"HTTPS\"` means that the scheme used will be https://. [optional] # noqa: E501 + scheme (str): Scheme to use for connecting to the host. Defaults to HTTP.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/http_header.py b/sdks/python/client/argo_workflows/model/http_header.py index de7ff70e9a83..e1d80342bada 100644 --- a/sdks/python/client/argo_workflows/model/http_header.py +++ b/sdks/python/client/argo_workflows/model/http_header.py @@ -106,7 +106,7 @@ def _from_openapi_data(cls, name, value, *args, **kwargs): # noqa: E501 """HTTPHeader - a model defined in OpenAPI Args: - name (str): The header field name + name (str): The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. value (str): The header field value Keyword Args: @@ -193,7 +193,7 @@ def __init__(self, name, value, *args, **kwargs): # noqa: E501 """HTTPHeader - a model defined in OpenAPI Args: - name (str): The header field name + name (str): The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. value (str): The header field value Keyword Args: diff --git a/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_container_node.py b/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_container_node.py index ddca8f08b4fe..01b3d9c4e949 100644 --- a/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_container_node.py +++ b/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_container_node.py @@ -31,6 +31,7 @@ def lazy_import(): from argo_workflows.model.container_port import ContainerPort + from argo_workflows.model.container_resize_policy import ContainerResizePolicy from argo_workflows.model.env_from_source import EnvFromSource from argo_workflows.model.env_var import EnvVar from argo_workflows.model.lifecycle import Lifecycle @@ -40,6 +41,7 @@ def lazy_import(): from argo_workflows.model.volume_device import VolumeDevice from argo_workflows.model.volume_mount import VolumeMount globals()['ContainerPort'] = ContainerPort + globals()['ContainerResizePolicy'] = ContainerResizePolicy globals()['EnvFromSource'] = EnvFromSource globals()['EnvVar'] = EnvVar globals()['Lifecycle'] = Lifecycle @@ -115,6 +117,7 @@ def openapi_types(): 'liveness_probe': (Probe,), # noqa: E501 'ports': ([ContainerPort],), # noqa: E501 'readiness_probe': (Probe,), # noqa: E501 + 'resize_policy': ([ContainerResizePolicy],), # noqa: E501 'resources': (ResourceRequirements,), # noqa: E501 'security_context': (SecurityContext,), # noqa: E501 'startup_probe': (Probe,), # noqa: E501 @@ -146,6 +149,7 @@ def discriminator(): 'liveness_probe': 'livenessProbe', # noqa: E501 'ports': 'ports', # noqa: E501 'readiness_probe': 'readinessProbe', # noqa: E501 + 'resize_policy': 'resizePolicy', # noqa: E501 'resources': 'resources', # noqa: E501 'security_context': 'securityContext', # noqa: E501 'startup_probe': 'startupProbe', # noqa: E501 @@ -214,6 +218,7 @@ def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501 liveness_probe (Probe): [optional] # noqa: E501 ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 @@ -321,6 +326,7 @@ def __init__(self, name, *args, **kwargs): # noqa: E501 liveness_probe (Probe): [optional] # noqa: E501 ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 diff --git a/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_script_template.py b/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_script_template.py index 705f283bf03d..5a7941dac455 100644 --- a/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_script_template.py +++ b/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_script_template.py @@ -31,6 +31,7 @@ def lazy_import(): from argo_workflows.model.container_port import ContainerPort + from argo_workflows.model.container_resize_policy import ContainerResizePolicy from argo_workflows.model.env_from_source import EnvFromSource from argo_workflows.model.env_var import EnvVar from argo_workflows.model.lifecycle import Lifecycle @@ -40,6 +41,7 @@ def lazy_import(): from argo_workflows.model.volume_device import VolumeDevice from argo_workflows.model.volume_mount import VolumeMount globals()['ContainerPort'] = ContainerPort + globals()['ContainerResizePolicy'] = ContainerResizePolicy globals()['EnvFromSource'] = EnvFromSource globals()['EnvVar'] = EnvVar globals()['Lifecycle'] = Lifecycle @@ -115,6 +117,7 @@ def openapi_types(): 'name': (str,), # noqa: E501 'ports': ([ContainerPort],), # noqa: E501 'readiness_probe': (Probe,), # noqa: E501 + 'resize_policy': ([ContainerResizePolicy],), # noqa: E501 'resources': (ResourceRequirements,), # noqa: E501 'security_context': (SecurityContext,), # noqa: E501 'startup_probe': (Probe,), # noqa: E501 @@ -146,6 +149,7 @@ def discriminator(): 'name': 'name', # noqa: E501 'ports': 'ports', # noqa: E501 'readiness_probe': 'readinessProbe', # noqa: E501 + 'resize_policy': 'resizePolicy', # noqa: E501 'resources': 'resources', # noqa: E501 'security_context': 'securityContext', # noqa: E501 'startup_probe': 'startupProbe', # noqa: E501 @@ -214,6 +218,7 @@ def _from_openapi_data(cls, image, source, *args, **kwargs): # noqa: E501 name (str): Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.. [optional] # noqa: E501 ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 @@ -322,6 +327,7 @@ def __init__(self, image, source, *args, **kwargs): # noqa: E501 name (str): Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.. [optional] # noqa: E501 ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 diff --git a/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_user_container.py b/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_user_container.py index e34e9a108e5b..5c2496677a0f 100644 --- a/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_user_container.py +++ b/sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_user_container.py @@ -31,6 +31,7 @@ def lazy_import(): from argo_workflows.model.container_port import ContainerPort + from argo_workflows.model.container_resize_policy import ContainerResizePolicy from argo_workflows.model.env_from_source import EnvFromSource from argo_workflows.model.env_var import EnvVar from argo_workflows.model.lifecycle import Lifecycle @@ -40,6 +41,7 @@ def lazy_import(): from argo_workflows.model.volume_device import VolumeDevice from argo_workflows.model.volume_mount import VolumeMount globals()['ContainerPort'] = ContainerPort + globals()['ContainerResizePolicy'] = ContainerResizePolicy globals()['EnvFromSource'] = EnvFromSource globals()['EnvVar'] = EnvVar globals()['Lifecycle'] = Lifecycle @@ -115,6 +117,7 @@ def openapi_types(): 'mirror_volume_mounts': (bool,), # noqa: E501 'ports': ([ContainerPort],), # noqa: E501 'readiness_probe': (Probe,), # noqa: E501 + 'resize_policy': ([ContainerResizePolicy],), # noqa: E501 'resources': (ResourceRequirements,), # noqa: E501 'security_context': (SecurityContext,), # noqa: E501 'startup_probe': (Probe,), # noqa: E501 @@ -146,6 +149,7 @@ def discriminator(): 'mirror_volume_mounts': 'mirrorVolumeMounts', # noqa: E501 'ports': 'ports', # noqa: E501 'readiness_probe': 'readinessProbe', # noqa: E501 + 'resize_policy': 'resizePolicy', # noqa: E501 'resources': 'resources', # noqa: E501 'security_context': 'securityContext', # noqa: E501 'startup_probe': 'startupProbe', # noqa: E501 @@ -214,6 +218,7 @@ def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501 mirror_volume_mounts (bool): MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding. [optional] # noqa: E501 ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 @@ -321,6 +326,7 @@ def __init__(self, name, *args, **kwargs): # noqa: E501 mirror_volume_mounts (bool): MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding. [optional] # noqa: E501 ports ([ContainerPort]): List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.. [optional] # noqa: E501 readiness_probe (Probe): [optional] # noqa: E501 + resize_policy ([ContainerResizePolicy]): Resources resize policy for the container.. [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 security_context (SecurityContext): [optional] # noqa: E501 startup_probe (Probe): [optional] # noqa: E501 diff --git a/sdks/python/client/argo_workflows/model/io_k8s_api_policy_v1_pod_disruption_budget_spec.py b/sdks/python/client/argo_workflows/model/io_k8s_api_policy_v1_pod_disruption_budget_spec.py index 4f71a528489e..5450eb710a35 100644 --- a/sdks/python/client/argo_workflows/model/io_k8s_api_policy_v1_pod_disruption_budget_spec.py +++ b/sdks/python/client/argo_workflows/model/io_k8s_api_policy_v1_pod_disruption_budget_spec.py @@ -90,6 +90,7 @@ def openapi_types(): 'max_unavailable': (str,), # noqa: E501 'min_available': (str,), # noqa: E501 'selector': (LabelSelector,), # noqa: E501 + 'unhealthy_pod_eviction_policy': (str,), # noqa: E501 } @cached_property @@ -101,6 +102,7 @@ def discriminator(): 'max_unavailable': 'maxUnavailable', # noqa: E501 'min_available': 'minAvailable', # noqa: E501 'selector': 'selector', # noqa: E501 + 'unhealthy_pod_eviction_policy': 'unhealthyPodEvictionPolicy', # noqa: E501 } read_only_vars = { @@ -147,6 +149,7 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 max_unavailable (str): [optional] # noqa: E501 min_available (str): [optional] # noqa: E501 selector (LabelSelector): [optional] # noqa: E501 + unhealthy_pod_eviction_policy (str): UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -231,6 +234,7 @@ def __init__(self, *args, **kwargs): # noqa: E501 max_unavailable (str): [optional] # noqa: E501 min_available (str): [optional] # noqa: E501 selector (LabelSelector): [optional] # noqa: E501 + unhealthy_pod_eviction_policy (str): UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/iscsi_volume_source.py b/sdks/python/client/argo_workflows/model/iscsi_volume_source.py index 2ae5e355f608..742d40780e2e 100644 --- a/sdks/python/client/argo_workflows/model/iscsi_volume_source.py +++ b/sdks/python/client/argo_workflows/model/iscsi_volume_source.py @@ -130,9 +130,9 @@ def _from_openapi_data(cls, iqn, lun, target_portal, *args, **kwargs): # noqa: """ISCSIVolumeSource - a model defined in OpenAPI Args: - iqn (str): Target iSCSI Qualified Name. - lun (int): iSCSI Target Lun number. - target_portal (str): iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + iqn (str): iqn is the target iSCSI Qualified Name. + lun (int): lun represents iSCSI Target Lun number. + target_portal (str): targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -165,13 +165,13 @@ def _from_openapi_data(cls, iqn, lun, target_portal, *args, **kwargs): # noqa: Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - chap_auth_discovery (bool): whether support iSCSI Discovery CHAP authentication. [optional] # noqa: E501 - chap_auth_session (bool): whether support iSCSI Session CHAP authentication. [optional] # noqa: E501 - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi. [optional] # noqa: E501 - initiator_name (str): Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.. [optional] # noqa: E501 - iscsi_interface (str): iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).. [optional] # noqa: E501 - portals ([str]): iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.. [optional] # noqa: E501 + chap_auth_discovery (bool): chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication. [optional] # noqa: E501 + chap_auth_session (bool): chapAuthSession defines whether support iSCSI Session CHAP authentication. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi. [optional] # noqa: E501 + initiator_name (str): initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.. [optional] # noqa: E501 + iscsi_interface (str): iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).. [optional] # noqa: E501 + portals ([str]): portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).. [optional] # noqa: E501 + read_only (bool): readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 """ @@ -227,9 +227,9 @@ def __init__(self, iqn, lun, target_portal, *args, **kwargs): # noqa: E501 """ISCSIVolumeSource - a model defined in OpenAPI Args: - iqn (str): Target iSCSI Qualified Name. - lun (int): iSCSI Target Lun number. - target_portal (str): iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + iqn (str): iqn is the target iSCSI Qualified Name. + lun (int): lun represents iSCSI Target Lun number. + target_portal (str): targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -262,13 +262,13 @@ def __init__(self, iqn, lun, target_portal, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - chap_auth_discovery (bool): whether support iSCSI Discovery CHAP authentication. [optional] # noqa: E501 - chap_auth_session (bool): whether support iSCSI Session CHAP authentication. [optional] # noqa: E501 - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi. [optional] # noqa: E501 - initiator_name (str): Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.. [optional] # noqa: E501 - iscsi_interface (str): iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).. [optional] # noqa: E501 - portals ([str]): iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.. [optional] # noqa: E501 + chap_auth_discovery (bool): chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication. [optional] # noqa: E501 + chap_auth_session (bool): chapAuthSession defines whether support iSCSI Session CHAP authentication. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi. [optional] # noqa: E501 + initiator_name (str): initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.. [optional] # noqa: E501 + iscsi_interface (str): iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).. [optional] # noqa: E501 + portals ([str]): portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).. [optional] # noqa: E501 + read_only (bool): readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 """ diff --git a/sdks/python/client/argo_workflows/model/key_to_path.py b/sdks/python/client/argo_workflows/model/key_to_path.py index 73e3a740d4a4..11587c81552e 100644 --- a/sdks/python/client/argo_workflows/model/key_to_path.py +++ b/sdks/python/client/argo_workflows/model/key_to_path.py @@ -108,8 +108,8 @@ def _from_openapi_data(cls, key, path, *args, **kwargs): # noqa: E501 """KeyToPath - a model defined in OpenAPI Args: - key (str): The key to project. - path (str): The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + key (str): key is the key to project. + path (str): path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -142,7 +142,7 @@ def _from_openapi_data(cls, key, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - mode (int): Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + mode (int): mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -196,8 +196,8 @@ def __init__(self, key, path, *args, **kwargs): # noqa: E501 """KeyToPath - a model defined in OpenAPI Args: - key (str): The key to project. - path (str): The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + key (str): key is the key to project. + path (str): path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -230,7 +230,7 @@ def __init__(self, key, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - mode (int): Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + mode (int): mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/list_meta.py b/sdks/python/client/argo_workflows/model/list_meta.py index c2d2f917b35d..e0c7b940021a 100644 --- a/sdks/python/client/argo_workflows/model/list_meta.py +++ b/sdks/python/client/argo_workflows/model/list_meta.py @@ -143,7 +143,7 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 _continue (str): continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.. [optional] # noqa: E501 remaining_item_count (int): remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.. [optional] # noqa: E501 resource_version (str): String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency. [optional] # noqa: E501 - self_link (str): selfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.. [optional] # noqa: E501 + self_link (str): Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -228,7 +228,7 @@ def __init__(self, *args, **kwargs): # noqa: E501 _continue (str): continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.. [optional] # noqa: E501 remaining_item_count (int): remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.. [optional] # noqa: E501 resource_version (str): String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency. [optional] # noqa: E501 - self_link (str): selfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.. [optional] # noqa: E501 + self_link (str): Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/nfs_volume_source.py b/sdks/python/client/argo_workflows/model/nfs_volume_source.py index b7a0a13f5ce5..fb426fea3b88 100644 --- a/sdks/python/client/argo_workflows/model/nfs_volume_source.py +++ b/sdks/python/client/argo_workflows/model/nfs_volume_source.py @@ -108,8 +108,8 @@ def _from_openapi_data(cls, path, server, *args, **kwargs): # noqa: E501 """NFSVolumeSource - a model defined in OpenAPI Args: - path (str): Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - server (str): Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + path (str): path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + server (str): server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -142,7 +142,7 @@ def _from_openapi_data(cls, path, server, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs. [optional] # noqa: E501 + read_only (bool): readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -196,8 +196,8 @@ def __init__(self, path, server, *args, **kwargs): # noqa: E501 """NFSVolumeSource - a model defined in OpenAPI Args: - path (str): Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - server (str): Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + path (str): path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + server (str): server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -230,7 +230,7 @@ def __init__(self, path, server, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs. [optional] # noqa: E501 + read_only (bool): readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/node_selector_requirement.py b/sdks/python/client/argo_workflows/model/node_selector_requirement.py index a887c2bcf074..71908b03277a 100644 --- a/sdks/python/client/argo_workflows/model/node_selector_requirement.py +++ b/sdks/python/client/argo_workflows/model/node_selector_requirement.py @@ -55,14 +55,6 @@ class NodeSelectorRequirement(ModelNormal): """ allowed_values = { - ('operator',): { - 'DOESNOTEXIST': "DoesNotExist", - 'EXISTS': "Exists", - 'GT': "Gt", - 'IN': "In", - 'LT': "Lt", - 'NOTIN': "NotIn", - }, } validations = { @@ -117,7 +109,7 @@ def _from_openapi_data(cls, key, operator, *args, **kwargs): # noqa: E501 Args: key (str): The label key that the selector applies to. - operator (str): Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Possible enum values: - `\"DoesNotExist\"` - `\"Exists\"` - `\"Gt\"` - `\"In\"` - `\"Lt\"` - `\"NotIn\"` + operator (str): Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -205,7 +197,7 @@ def __init__(self, key, operator, *args, **kwargs): # noqa: E501 Args: key (str): The label key that the selector applies to. - operator (str): Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Possible enum values: - `\"DoesNotExist\"` - `\"Exists\"` - `\"Gt\"` - `\"In\"` - `\"Lt\"` - `\"NotIn\"` + operator (str): Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types diff --git a/sdks/python/client/argo_workflows/model/object_meta.py b/sdks/python/client/argo_workflows/model/object_meta.py index 1bf09981c09e..c3d81906530b 100644 --- a/sdks/python/client/argo_workflows/model/object_meta.py +++ b/sdks/python/client/argo_workflows/model/object_meta.py @@ -90,7 +90,6 @@ def openapi_types(): lazy_import() return { 'annotations': ({str: (str,)},), # noqa: E501 - 'cluster_name': (str,), # noqa: E501 'creation_timestamp': (datetime,), # noqa: E501 'deletion_grace_period_seconds': (int,), # noqa: E501 'deletion_timestamp': (datetime,), # noqa: E501 @@ -114,7 +113,6 @@ def discriminator(): attribute_map = { 'annotations': 'annotations', # noqa: E501 - 'cluster_name': 'clusterName', # noqa: E501 'creation_timestamp': 'creationTimestamp', # noqa: E501 'deletion_grace_period_seconds': 'deletionGracePeriodSeconds', # noqa: E501 'deletion_timestamp': 'deletionTimestamp', # noqa: E501 @@ -172,22 +170,21 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - annotations ({str: (str,)}): Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations. [optional] # noqa: E501 - cluster_name (str): The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.. [optional] # noqa: E501 + annotations ({str: (str,)}): Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations. [optional] # noqa: E501 creation_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 deletion_grace_period_seconds (int): Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.. [optional] # noqa: E501 deletion_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 finalizers ([str]): Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.. [optional] # noqa: E501 - generate_name (str): GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency. [optional] # noqa: E501 + generate_name (str): GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency. [optional] # noqa: E501 generation (int): A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.. [optional] # noqa: E501 - labels ({str: (str,)}): Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. [optional] # noqa: E501 + labels ({str: (str,)}): Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels. [optional] # noqa: E501 managed_fields ([ManagedFieldsEntry]): ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.. [optional] # noqa: E501 - name (str): Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names. [optional] # noqa: E501 - namespace (str): Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces. [optional] # noqa: E501 + name (str): Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names. [optional] # noqa: E501 + namespace (str): Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces. [optional] # noqa: E501 owner_references ([OwnerReference]): List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.. [optional] # noqa: E501 resource_version (str): An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency. [optional] # noqa: E501 - self_link (str): SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.. [optional] # noqa: E501 - uid (str): UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids. [optional] # noqa: E501 + self_link (str): Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.. [optional] # noqa: E501 + uid (str): UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -269,22 +266,21 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - annotations ({str: (str,)}): Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations. [optional] # noqa: E501 - cluster_name (str): The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.. [optional] # noqa: E501 + annotations ({str: (str,)}): Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations. [optional] # noqa: E501 creation_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 deletion_grace_period_seconds (int): Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.. [optional] # noqa: E501 deletion_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 finalizers ([str]): Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.. [optional] # noqa: E501 - generate_name (str): GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency. [optional] # noqa: E501 + generate_name (str): GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency. [optional] # noqa: E501 generation (int): A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.. [optional] # noqa: E501 - labels ({str: (str,)}): Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. [optional] # noqa: E501 + labels ({str: (str,)}): Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels. [optional] # noqa: E501 managed_fields ([ManagedFieldsEntry]): ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.. [optional] # noqa: E501 - name (str): Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names. [optional] # noqa: E501 - namespace (str): Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces. [optional] # noqa: E501 + name (str): Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names. [optional] # noqa: E501 + namespace (str): Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces. [optional] # noqa: E501 owner_references ([OwnerReference]): List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.. [optional] # noqa: E501 resource_version (str): An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency. [optional] # noqa: E501 - self_link (str): SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.. [optional] # noqa: E501 - uid (str): UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids. [optional] # noqa: E501 + self_link (str): Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.. [optional] # noqa: E501 + uid (str): UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/owner_reference.py b/sdks/python/client/argo_workflows/model/owner_reference.py index 2d4370a53023..de92bdab3351 100644 --- a/sdks/python/client/argo_workflows/model/owner_reference.py +++ b/sdks/python/client/argo_workflows/model/owner_reference.py @@ -116,8 +116,8 @@ def _from_openapi_data(cls, api_version, kind, name, uid, *args, **kwargs): # n Args: api_version (str): API version of the referent. kind (str): Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - name (str): Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names - uid (str): UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + uid (str): UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -150,7 +150,7 @@ def _from_openapi_data(cls, api_version, kind, name, uid, *args, **kwargs): # n Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - block_owner_deletion (bool): If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.. [optional] # noqa: E501 + block_owner_deletion (bool): If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.. [optional] # noqa: E501 controller (bool): If true, this reference points to the managing controller.. [optional] # noqa: E501 """ @@ -209,8 +209,8 @@ def __init__(self, api_version, kind, name, uid, *args, **kwargs): # noqa: E501 Args: api_version (str): API version of the referent. kind (str): Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - name (str): Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names - uid (str): UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids + name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names + uid (str): UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -243,7 +243,7 @@ def __init__(self, api_version, kind, name, uid, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - block_owner_deletion (bool): If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.. [optional] # noqa: E501 + block_owner_deletion (bool): If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.. [optional] # noqa: E501 controller (bool): If true, this reference points to the managing controller.. [optional] # noqa: E501 """ diff --git a/sdks/python/client/argo_workflows/model/persistent_volume_claim_condition.py b/sdks/python/client/argo_workflows/model/persistent_volume_claim_condition.py index 10c3ee2b90d0..6ff39a6e36b5 100644 --- a/sdks/python/client/argo_workflows/model/persistent_volume_claim_condition.py +++ b/sdks/python/client/argo_workflows/model/persistent_volume_claim_condition.py @@ -55,10 +55,6 @@ class PersistentVolumeClaimCondition(ModelNormal): """ allowed_values = { - ('type',): { - 'FILESYSTEMRESIZEPENDING': "FileSystemResizePending", - 'RESIZING': "Resizing", - }, } validations = { @@ -119,7 +115,7 @@ def _from_openapi_data(cls, status, type, *args, **kwargs): # noqa: E501 Args: status (str): - type (str): Possible enum values: - `\"FileSystemResizePending\"` - controller resize is finished and a file system resize is pending on node - `\"Resizing\"` - a user trigger resize of pvc has been started + type (str): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -154,8 +150,8 @@ def _from_openapi_data(cls, status, type, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) last_probe_time (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 last_transition_time (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 - message (str): Human-readable message indicating details about last transition.. [optional] # noqa: E501 - reason (str): Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.. [optional] # noqa: E501 + message (str): message is the human-readable message indicating details about last transition.. [optional] # noqa: E501 + reason (str): reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -210,7 +206,7 @@ def __init__(self, status, type, *args, **kwargs): # noqa: E501 Args: status (str): - type (str): Possible enum values: - `\"FileSystemResizePending\"` - controller resize is finished and a file system resize is pending on node - `\"Resizing\"` - a user trigger resize of pvc has been started + type (str): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -245,8 +241,8 @@ def __init__(self, status, type, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) last_probe_time (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 last_transition_time (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501 - message (str): Human-readable message indicating details about last transition.. [optional] # noqa: E501 - reason (str): Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.. [optional] # noqa: E501 + message (str): message is the human-readable message indicating details about last transition.. [optional] # noqa: E501 + reason (str): reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/persistent_volume_claim_spec.py b/sdks/python/client/argo_workflows/model/persistent_volume_claim_spec.py index d610e2305ec4..c992e2c93e0c 100644 --- a/sdks/python/client/argo_workflows/model/persistent_volume_claim_spec.py +++ b/sdks/python/client/argo_workflows/model/persistent_volume_claim_spec.py @@ -33,9 +33,11 @@ def lazy_import(): from argo_workflows.model.label_selector import LabelSelector from argo_workflows.model.resource_requirements import ResourceRequirements from argo_workflows.model.typed_local_object_reference import TypedLocalObjectReference + from argo_workflows.model.typed_object_reference import TypedObjectReference globals()['LabelSelector'] = LabelSelector globals()['ResourceRequirements'] = ResourceRequirements globals()['TypedLocalObjectReference'] = TypedLocalObjectReference + globals()['TypedObjectReference'] = TypedObjectReference class PersistentVolumeClaimSpec(ModelNormal): @@ -93,7 +95,7 @@ def openapi_types(): return { 'access_modes': ([str],), # noqa: E501 'data_source': (TypedLocalObjectReference,), # noqa: E501 - 'data_source_ref': (TypedLocalObjectReference,), # noqa: E501 + 'data_source_ref': (TypedObjectReference,), # noqa: E501 'resources': (ResourceRequirements,), # noqa: E501 'selector': (LabelSelector,), # noqa: E501 'storage_class_name': (str,), # noqa: E501 @@ -158,14 +160,14 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - access_modes ([str]): AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 + access_modes ([str]): accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 data_source (TypedLocalObjectReference): [optional] # noqa: E501 - data_source_ref (TypedLocalObjectReference): [optional] # noqa: E501 + data_source_ref (TypedObjectReference): [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 selector (LabelSelector): [optional] # noqa: E501 - storage_class_name (str): Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. [optional] # noqa: E501 + storage_class_name (str): storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. [optional] # noqa: E501 volume_mode (str): volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.. [optional] # noqa: E501 - volume_name (str): VolumeName is the binding reference to the PersistentVolume backing this claim.. [optional] # noqa: E501 + volume_name (str): volumeName is the binding reference to the PersistentVolume backing this claim.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -247,14 +249,14 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - access_modes ([str]): AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 + access_modes ([str]): accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 data_source (TypedLocalObjectReference): [optional] # noqa: E501 - data_source_ref (TypedLocalObjectReference): [optional] # noqa: E501 + data_source_ref (TypedObjectReference): [optional] # noqa: E501 resources (ResourceRequirements): [optional] # noqa: E501 selector (LabelSelector): [optional] # noqa: E501 - storage_class_name (str): Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. [optional] # noqa: E501 + storage_class_name (str): storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1. [optional] # noqa: E501 volume_mode (str): volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.. [optional] # noqa: E501 - volume_name (str): VolumeName is the binding reference to the PersistentVolume backing this claim.. [optional] # noqa: E501 + volume_name (str): volumeName is the binding reference to the PersistentVolume backing this claim.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/persistent_volume_claim_status.py b/sdks/python/client/argo_workflows/model/persistent_volume_claim_status.py index e314a90e0ff4..e1617fe86026 100644 --- a/sdks/python/client/argo_workflows/model/persistent_volume_claim_status.py +++ b/sdks/python/client/argo_workflows/model/persistent_volume_claim_status.py @@ -59,11 +59,6 @@ class PersistentVolumeClaimStatus(ModelNormal): """ allowed_values = { - ('phase',): { - 'BOUND': "Bound", - 'LOST': "Lost", - 'PENDING': "Pending", - }, } validations = { @@ -155,12 +150,12 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - access_modes ([str]): AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 - allocated_resources ({str: (str,)}): The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 - capacity ({str: (str,)}): Represents the actual resources of the underlying volume.. [optional] # noqa: E501 - conditions ([PersistentVolumeClaimCondition]): Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.. [optional] # noqa: E501 - phase (str): Phase represents the current phase of PersistentVolumeClaim. Possible enum values: - `\"Bound\"` used for PersistentVolumeClaims that are bound - `\"Lost\"` used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost. - `\"Pending\"` used for PersistentVolumeClaims that are not yet bound. [optional] # noqa: E501 - resize_status (str): ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 + access_modes ([str]): accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 + allocated_resources ({str: (str,)}): allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 + capacity ({str: (str,)}): capacity represents the actual resources of the underlying volume.. [optional] # noqa: E501 + conditions ([PersistentVolumeClaimCondition]): conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.. [optional] # noqa: E501 + phase (str): phase represents the current phase of PersistentVolumeClaim.. [optional] # noqa: E501 + resize_status (str): resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -242,12 +237,12 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - access_modes ([str]): AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 - allocated_resources ({str: (str,)}): The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 - capacity ({str: (str,)}): Represents the actual resources of the underlying volume.. [optional] # noqa: E501 - conditions ([PersistentVolumeClaimCondition]): Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.. [optional] # noqa: E501 - phase (str): Phase represents the current phase of PersistentVolumeClaim. Possible enum values: - `\"Bound\"` used for PersistentVolumeClaims that are bound - `\"Lost\"` used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost. - `\"Pending\"` used for PersistentVolumeClaims that are not yet bound. [optional] # noqa: E501 - resize_status (str): ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 + access_modes ([str]): accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1. [optional] # noqa: E501 + allocated_resources ({str: (str,)}): allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 + capacity ({str: (str,)}): capacity represents the actual resources of the underlying volume.. [optional] # noqa: E501 + conditions ([PersistentVolumeClaimCondition]): conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.. [optional] # noqa: E501 + phase (str): phase represents the current phase of PersistentVolumeClaim.. [optional] # noqa: E501 + resize_status (str): resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/persistent_volume_claim_volume_source.py b/sdks/python/client/argo_workflows/model/persistent_volume_claim_volume_source.py index 062d79312018..6900c41d038a 100644 --- a/sdks/python/client/argo_workflows/model/persistent_volume_claim_volume_source.py +++ b/sdks/python/client/argo_workflows/model/persistent_volume_claim_volume_source.py @@ -106,7 +106,7 @@ def _from_openapi_data(cls, claim_name, *args, **kwargs): # noqa: E501 """PersistentVolumeClaimVolumeSource - a model defined in OpenAPI Args: - claim_name (str): ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + claim_name (str): claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -139,7 +139,7 @@ def _from_openapi_data(cls, claim_name, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): Will force the ReadOnly setting in VolumeMounts. Default false.. [optional] # noqa: E501 + read_only (bool): readOnly Will force the ReadOnly setting in VolumeMounts. Default false.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -192,7 +192,7 @@ def __init__(self, claim_name, *args, **kwargs): # noqa: E501 """PersistentVolumeClaimVolumeSource - a model defined in OpenAPI Args: - claim_name (str): ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + claim_name (str): claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -225,7 +225,7 @@ def __init__(self, claim_name, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - read_only (bool): Will force the ReadOnly setting in VolumeMounts. Default false.. [optional] # noqa: E501 + read_only (bool): readOnly Will force the ReadOnly setting in VolumeMounts. Default false.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/photon_persistent_disk_volume_source.py b/sdks/python/client/argo_workflows/model/photon_persistent_disk_volume_source.py index 44081c16b657..b65d05f25506 100644 --- a/sdks/python/client/argo_workflows/model/photon_persistent_disk_volume_source.py +++ b/sdks/python/client/argo_workflows/model/photon_persistent_disk_volume_source.py @@ -106,7 +106,7 @@ def _from_openapi_data(cls, pd_id, *args, **kwargs): # noqa: E501 """PhotonPersistentDiskVolumeSource - a model defined in OpenAPI Args: - pd_id (str): ID that identifies Photon Controller persistent disk + pd_id (str): pdID is the ID that identifies Photon Controller persistent disk Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -139,7 +139,7 @@ def _from_openapi_data(cls, pd_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -192,7 +192,7 @@ def __init__(self, pd_id, *args, **kwargs): # noqa: E501 """PhotonPersistentDiskVolumeSource - a model defined in OpenAPI Args: - pd_id (str): ID that identifies Photon Controller persistent disk + pd_id (str): pdID is the ID that identifies Photon Controller persistent disk Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -225,7 +225,7 @@ def __init__(self, pd_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/pod_affinity_term.py b/sdks/python/client/argo_workflows/model/pod_affinity_term.py index 50f340e70553..450796620b71 100644 --- a/sdks/python/client/argo_workflows/model/pod_affinity_term.py +++ b/sdks/python/client/argo_workflows/model/pod_affinity_term.py @@ -151,7 +151,7 @@ def _from_openapi_data(cls, topology_key, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) label_selector (LabelSelector): [optional] # noqa: E501 namespace_selector (LabelSelector): [optional] # noqa: E501 - namespaces ([str]): namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". [optional] # noqa: E501 + namespaces ([str]): namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -239,7 +239,7 @@ def __init__(self, topology_key, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) label_selector (LabelSelector): [optional] # noqa: E501 namespace_selector (LabelSelector): [optional] # noqa: E501 - namespaces ([str]): namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". [optional] # noqa: E501 + namespaces ([str]): namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/pod_security_context.py b/sdks/python/client/argo_workflows/model/pod_security_context.py index 0ed02f420fcd..1a9dd4c8dedc 100644 --- a/sdks/python/client/argo_workflows/model/pod_security_context.py +++ b/sdks/python/client/argo_workflows/model/pod_security_context.py @@ -171,7 +171,7 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 run_as_user (int): The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 se_linux_options (SELinuxOptions): [optional] # noqa: E501 seccomp_profile (SeccompProfile): [optional] # noqa: E501 - supplemental_groups ([int]): A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 + supplemental_groups ([int]): A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 sysctls ([Sysctl]): Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 windows_options (WindowsSecurityContextOptions): [optional] # noqa: E501 """ @@ -262,7 +262,7 @@ def __init__(self, *args, **kwargs): # noqa: E501 run_as_user (int): The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 se_linux_options (SELinuxOptions): [optional] # noqa: E501 seccomp_profile (SeccompProfile): [optional] # noqa: E501 - supplemental_groups ([int]): A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 + supplemental_groups ([int]): A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 sysctls ([Sysctl]): Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.. [optional] # noqa: E501 windows_options (WindowsSecurityContextOptions): [optional] # noqa: E501 """ diff --git a/sdks/python/client/argo_workflows/model/portworx_volume_source.py b/sdks/python/client/argo_workflows/model/portworx_volume_source.py index 01c61e0d232f..09d7b387097b 100644 --- a/sdks/python/client/argo_workflows/model/portworx_volume_source.py +++ b/sdks/python/client/argo_workflows/model/portworx_volume_source.py @@ -108,7 +108,7 @@ def _from_openapi_data(cls, volume_id, *args, **kwargs): # noqa: E501 """PortworxVolumeSource - a model defined in OpenAPI Args: - volume_id (str): VolumeID uniquely identifies a Portworx volume + volume_id (str): volumeID uniquely identifies a Portworx volume Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -141,8 +141,8 @@ def _from_openapi_data(cls, volume_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + fs_type (str): fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -195,7 +195,7 @@ def __init__(self, volume_id, *args, **kwargs): # noqa: E501 """PortworxVolumeSource - a model defined in OpenAPI Args: - volume_id (str): VolumeID uniquely identifies a Portworx volume + volume_id (str): volumeID uniquely identifies a Portworx volume Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -228,8 +228,8 @@ def __init__(self, volume_id, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + fs_type (str): fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/projected_volume_source.py b/sdks/python/client/argo_workflows/model/projected_volume_source.py index d0cdb60f6254..3a7b186faae1 100644 --- a/sdks/python/client/argo_workflows/model/projected_volume_source.py +++ b/sdks/python/client/argo_workflows/model/projected_volume_source.py @@ -142,8 +142,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - default_mode (int): Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 - sources ([VolumeProjection]): list of volume projections. [optional] # noqa: E501 + default_mode (int): defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + sources ([VolumeProjection]): sources is the list of volume projections. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -225,8 +225,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - default_mode (int): Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 - sources ([VolumeProjection]): list of volume projections. [optional] # noqa: E501 + default_mode (int): defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + sources ([VolumeProjection]): sources is the list of volume projections. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/quobyte_volume_source.py b/sdks/python/client/argo_workflows/model/quobyte_volume_source.py index 644e470873ef..8a9385dbfbc5 100644 --- a/sdks/python/client/argo_workflows/model/quobyte_volume_source.py +++ b/sdks/python/client/argo_workflows/model/quobyte_volume_source.py @@ -114,8 +114,8 @@ def _from_openapi_data(cls, registry, volume, *args, **kwargs): # noqa: E501 """QuobyteVolumeSource - a model defined in OpenAPI Args: - registry (str): Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes - volume (str): Volume is a string that references an already created Quobyte volume by name. + registry (str): registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + volume (str): volume is a string that references an already created Quobyte volume by name. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -148,10 +148,10 @@ def _from_openapi_data(cls, registry, volume, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - group (str): Group to map volume access to Default is no group. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.. [optional] # noqa: E501 - tenant (str): Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin. [optional] # noqa: E501 - user (str): User to map volume access to Defaults to serivceaccount user. [optional] # noqa: E501 + group (str): group to map volume access to Default is no group. [optional] # noqa: E501 + read_only (bool): readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.. [optional] # noqa: E501 + tenant (str): tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin. [optional] # noqa: E501 + user (str): user to map volume access to Defaults to serivceaccount user. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -205,8 +205,8 @@ def __init__(self, registry, volume, *args, **kwargs): # noqa: E501 """QuobyteVolumeSource - a model defined in OpenAPI Args: - registry (str): Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes - volume (str): Volume is a string that references an already created Quobyte volume by name. + registry (str): registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + volume (str): volume is a string that references an already created Quobyte volume by name. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -239,10 +239,10 @@ def __init__(self, registry, volume, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - group (str): Group to map volume access to Default is no group. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.. [optional] # noqa: E501 - tenant (str): Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin. [optional] # noqa: E501 - user (str): User to map volume access to Defaults to serivceaccount user. [optional] # noqa: E501 + group (str): group to map volume access to Default is no group. [optional] # noqa: E501 + read_only (bool): readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.. [optional] # noqa: E501 + tenant (str): tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin. [optional] # noqa: E501 + user (str): user to map volume access to Defaults to serivceaccount user. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/rbd_volume_source.py b/sdks/python/client/argo_workflows/model/rbd_volume_source.py index 16633ebba3e0..bc6c5a122406 100644 --- a/sdks/python/client/argo_workflows/model/rbd_volume_source.py +++ b/sdks/python/client/argo_workflows/model/rbd_volume_source.py @@ -124,8 +124,8 @@ def _from_openapi_data(cls, image, monitors, *args, **kwargs): # noqa: E501 """RBDVolumeSource - a model defined in OpenAPI Args: - image (str): The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - monitors ([str]): A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + image (str): image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + monitors ([str]): monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -158,12 +158,12 @@ def _from_openapi_data(cls, image, monitors, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd. [optional] # noqa: E501 - keyring (str): Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 - pool (str): The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd. [optional] # noqa: E501 + keyring (str): keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + pool (str): pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + read_only (bool): readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 - user (str): The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + user (str): user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -217,8 +217,8 @@ def __init__(self, image, monitors, *args, **kwargs): # noqa: E501 """RBDVolumeSource - a model defined in OpenAPI Args: - image (str): The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it - monitors ([str]): A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + image (str): image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + monitors ([str]): monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -251,12 +251,12 @@ def __init__(self, image, monitors, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd. [optional] # noqa: E501 - keyring (str): Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 - pool (str): The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 - read_only (bool): ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd. [optional] # noqa: E501 + keyring (str): keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + pool (str): pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + read_only (bool): readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 - user (str): The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 + user (str): user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/resource_claim.py b/sdks/python/client/argo_workflows/model/resource_claim.py new file mode 100644 index 000000000000..a5dadcfc748a --- /dev/null +++ b/sdks/python/client/argo_workflows/model/resource_claim.py @@ -0,0 +1,261 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from argo_workflows.exceptions import ApiAttributeError + + + +class ResourceClaim(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + 'name': (str,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + + attribute_map = { + 'name': 'name', # noqa: E501 + } + + read_only_vars = { + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501 + """ResourceClaim - a model defined in OpenAPI + + Args: + name (str): Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.name = name + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, name, *args, **kwargs): # noqa: E501 + """ResourceClaim - a model defined in OpenAPI + + Args: + name (str): Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.name = name + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/sdks/python/client/argo_workflows/model/resource_field_selector.py b/sdks/python/client/argo_workflows/model/resource_field_selector.py index b00539c01ed6..19a8654b6c6b 100644 --- a/sdks/python/client/argo_workflows/model/resource_field_selector.py +++ b/sdks/python/client/argo_workflows/model/resource_field_selector.py @@ -142,7 +142,7 @@ def _from_openapi_data(cls, resource, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) container_name (str): Container name: required for volumes, optional for env vars. [optional] # noqa: E501 - divisor (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 + divisor (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -229,7 +229,7 @@ def __init__(self, resource, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) container_name (str): Container name: required for volumes, optional for env vars. [optional] # noqa: E501 - divisor (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 + divisor (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` ::= (Note that may be empty, from the \"\" case in .) ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) ::= \"e\" | \"E\" ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/resource_requirements.py b/sdks/python/client/argo_workflows/model/resource_requirements.py index 9ba0fa404963..ffca50ec8c57 100644 --- a/sdks/python/client/argo_workflows/model/resource_requirements.py +++ b/sdks/python/client/argo_workflows/model/resource_requirements.py @@ -29,6 +29,10 @@ from argo_workflows.exceptions import ApiAttributeError +def lazy_import(): + from argo_workflows.model.resource_claim import ResourceClaim + globals()['ResourceClaim'] = ResourceClaim + class ResourceRequirements(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. @@ -66,6 +70,7 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ + lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @@ -80,7 +85,9 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ + lazy_import() return { + 'claims': ([ResourceClaim],), # noqa: E501 'limits': ({str: (str,)},), # noqa: E501 'requests': ({str: (str,)},), # noqa: E501 } @@ -91,6 +98,7 @@ def discriminator(): attribute_map = { + 'claims': 'claims', # noqa: E501 'limits': 'limits', # noqa: E501 'requests': 'requests', # noqa: E501 } @@ -136,8 +144,9 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + claims ([ResourceClaim]): Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers.. [optional] # noqa: E501 limits ({str: (str,)}): Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. [optional] # noqa: E501 - requests ({str: (str,)}): Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. [optional] # noqa: E501 + requests ({str: (str,)}): Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -219,8 +228,9 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + claims ([ResourceClaim]): Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers.. [optional] # noqa: E501 limits ({str: (str,)}): Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. [optional] # noqa: E501 - requests ({str: (str,)}): Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. [optional] # noqa: E501 + requests ({str: (str,)}): Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/scale_io_volume_source.py b/sdks/python/client/argo_workflows/model/scale_io_volume_source.py index 7dd161b71e6e..a743cdf8be4c 100644 --- a/sdks/python/client/argo_workflows/model/scale_io_volume_source.py +++ b/sdks/python/client/argo_workflows/model/scale_io_volume_source.py @@ -128,9 +128,9 @@ def _from_openapi_data(cls, gateway, secret_ref, system, *args, **kwargs): # no """ScaleIOVolumeSource - a model defined in OpenAPI Args: - gateway (str): The host address of the ScaleIO API Gateway. + gateway (str): gateway is the host address of the ScaleIO API Gateway. secret_ref (LocalObjectReference): - system (str): The name of the storage system as configured in ScaleIO. + system (str): system is the name of the storage system as configured in ScaleIO. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -163,13 +163,13 @@ def _from_openapi_data(cls, gateway, secret_ref, system, *args, **kwargs): # no Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".. [optional] # noqa: E501 - protection_domain (str): The name of the ScaleIO Protection Domain for the configured storage.. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 - ssl_enabled (bool): Flag to enable/disable SSL communication with Gateway, default false. [optional] # noqa: E501 - storage_mode (str): Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.. [optional] # noqa: E501 - storage_pool (str): The ScaleIO Storage Pool associated with the protection domain.. [optional] # noqa: E501 - volume_name (str): The name of a volume already created in the ScaleIO system that is associated with this volume source.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".. [optional] # noqa: E501 + protection_domain (str): protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.. [optional] # noqa: E501 + read_only (bool): readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + ssl_enabled (bool): sslEnabled Flag enable/disable SSL communication with Gateway, default false. [optional] # noqa: E501 + storage_mode (str): storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.. [optional] # noqa: E501 + storage_pool (str): storagePool is the ScaleIO Storage Pool associated with the protection domain.. [optional] # noqa: E501 + volume_name (str): volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -224,9 +224,9 @@ def __init__(self, gateway, secret_ref, system, *args, **kwargs): # noqa: E501 """ScaleIOVolumeSource - a model defined in OpenAPI Args: - gateway (str): The host address of the ScaleIO API Gateway. + gateway (str): gateway is the host address of the ScaleIO API Gateway. secret_ref (LocalObjectReference): - system (str): The name of the storage system as configured in ScaleIO. + system (str): system is the name of the storage system as configured in ScaleIO. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -259,13 +259,13 @@ def __init__(self, gateway, secret_ref, system, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".. [optional] # noqa: E501 - protection_domain (str): The name of the ScaleIO Protection Domain for the configured storage.. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 - ssl_enabled (bool): Flag to enable/disable SSL communication with Gateway, default false. [optional] # noqa: E501 - storage_mode (str): Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.. [optional] # noqa: E501 - storage_pool (str): The ScaleIO Storage Pool associated with the protection domain.. [optional] # noqa: E501 - volume_name (str): The name of a volume already created in the ScaleIO system that is associated with this volume source.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".. [optional] # noqa: E501 + protection_domain (str): protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.. [optional] # noqa: E501 + read_only (bool): readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + ssl_enabled (bool): sslEnabled Flag enable/disable SSL communication with Gateway, default false. [optional] # noqa: E501 + storage_mode (str): storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.. [optional] # noqa: E501 + storage_pool (str): storagePool is the ScaleIO Storage Pool associated with the protection domain.. [optional] # noqa: E501 + volume_name (str): volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/seccomp_profile.py b/sdks/python/client/argo_workflows/model/seccomp_profile.py index fb07ca0bd919..183653f33701 100644 --- a/sdks/python/client/argo_workflows/model/seccomp_profile.py +++ b/sdks/python/client/argo_workflows/model/seccomp_profile.py @@ -55,11 +55,6 @@ class SeccompProfile(ModelNormal): """ allowed_values = { - ('type',): { - 'LOCALHOST': "Localhost", - 'RUNTIMEDEFAULT': "RuntimeDefault", - 'UNCONFINED': "Unconfined", - }, } validations = { @@ -111,7 +106,7 @@ def _from_openapi_data(cls, type, *args, **kwargs): # noqa: E501 """SeccompProfile - a model defined in OpenAPI Args: - type (str): type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Possible enum values: - `\"Localhost\"` indicates a profile defined in a file on the node should be used. The file's location relative to /seccomp. - `\"RuntimeDefault\"` represents the default container runtime seccomp profile. - `\"Unconfined\"` indicates no seccomp profile is applied (A.K.A. unconfined). + type (str): type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -197,7 +192,7 @@ def __init__(self, type, *args, **kwargs): # noqa: E501 """SeccompProfile - a model defined in OpenAPI Args: - type (str): type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Possible enum values: - `\"Localhost\"` indicates a profile defined in a file on the node should be used. The file's location relative to /seccomp. - `\"RuntimeDefault\"` represents the default container runtime seccomp profile. - `\"Unconfined\"` indicates no seccomp profile is applied (A.K.A. unconfined). + type (str): type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types diff --git a/sdks/python/client/argo_workflows/model/secret_projection.py b/sdks/python/client/argo_workflows/model/secret_projection.py index 823c7c84fa43..8a9a0516cd3e 100644 --- a/sdks/python/client/argo_workflows/model/secret_projection.py +++ b/sdks/python/client/argo_workflows/model/secret_projection.py @@ -144,9 +144,9 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + items ([KeyToPath]): items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names. [optional] # noqa: E501 - optional (bool): Specify whether the Secret or its key must be defined. [optional] # noqa: E501 + optional (bool): optional field specify whether the Secret or its key must be defined. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -228,9 +228,9 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + items ([KeyToPath]): items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 name (str): Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names. [optional] # noqa: E501 - optional (bool): Specify whether the Secret or its key must be defined. [optional] # noqa: E501 + optional (bool): optional field specify whether the Secret or its key must be defined. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/secret_volume_source.py b/sdks/python/client/argo_workflows/model/secret_volume_source.py index 54e0667cfd0c..a6b794bcba27 100644 --- a/sdks/python/client/argo_workflows/model/secret_volume_source.py +++ b/sdks/python/client/argo_workflows/model/secret_volume_source.py @@ -146,10 +146,10 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - default_mode (int): Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 - optional (bool): Specify whether the Secret or its keys must be defined. [optional] # noqa: E501 - secret_name (str): Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret. [optional] # noqa: E501 + default_mode (int): defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + items ([KeyToPath]): items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + optional (bool): optional field specify whether the Secret or its keys must be defined. [optional] # noqa: E501 + secret_name (str): secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -231,10 +231,10 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - default_mode (int): Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 - items ([KeyToPath]): If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 - optional (bool): Specify whether the Secret or its keys must be defined. [optional] # noqa: E501 - secret_name (str): Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret. [optional] # noqa: E501 + default_mode (int): defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501 + items ([KeyToPath]): items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.. [optional] # noqa: E501 + optional (bool): optional field specify whether the Secret or its keys must be defined. [optional] # noqa: E501 + secret_name (str): secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/service_account_token_projection.py b/sdks/python/client/argo_workflows/model/service_account_token_projection.py index 2943aa80d2e5..8366955dbf46 100644 --- a/sdks/python/client/argo_workflows/model/service_account_token_projection.py +++ b/sdks/python/client/argo_workflows/model/service_account_token_projection.py @@ -108,7 +108,7 @@ def _from_openapi_data(cls, path, *args, **kwargs): # noqa: E501 """ServiceAccountTokenProjection - a model defined in OpenAPI Args: - path (str): Path is the path relative to the mount point of the file to project the token into. + path (str): path is the path relative to the mount point of the file to project the token into. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -141,8 +141,8 @@ def _from_openapi_data(cls, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - audience (str): Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.. [optional] # noqa: E501 - expiration_seconds (int): ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.. [optional] # noqa: E501 + audience (str): audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.. [optional] # noqa: E501 + expiration_seconds (int): expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -195,7 +195,7 @@ def __init__(self, path, *args, **kwargs): # noqa: E501 """ServiceAccountTokenProjection - a model defined in OpenAPI Args: - path (str): Path is the path relative to the mount point of the file to project the token into. + path (str): path is the path relative to the mount point of the file to project the token into. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -228,8 +228,8 @@ def __init__(self, path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - audience (str): Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.. [optional] # noqa: E501 - expiration_seconds (int): ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.. [optional] # noqa: E501 + audience (str): audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.. [optional] # noqa: E501 + expiration_seconds (int): expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/service_port.py b/sdks/python/client/argo_workflows/model/service_port.py index dd8493e10de9..267284046238 100644 --- a/sdks/python/client/argo_workflows/model/service_port.py +++ b/sdks/python/client/argo_workflows/model/service_port.py @@ -55,11 +55,6 @@ class ServicePort(ModelNormal): """ allowed_values = { - ('protocol',): { - 'SCTP': "SCTP", - 'TCP': "TCP", - 'UDP': "UDP", - }, } validations = { @@ -152,10 +147,10 @@ def _from_openapi_data(cls, port, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - app_protocol (str): The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.. [optional] # noqa: E501 + app_protocol (str): The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.. [optional] # noqa: E501 name (str): The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.. [optional] # noqa: E501 node_port (int): The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. [optional] # noqa: E501 - protocol (str): The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. Possible enum values: - `\"SCTP\"` is the SCTP protocol. - `\"TCP\"` is the TCP protocol. - `\"UDP\"` is the UDP protocol.. [optional] # noqa: E501 + protocol (str): The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.. [optional] # noqa: E501 target_port (str): [optional] # noqa: E501 """ @@ -242,10 +237,10 @@ def __init__(self, port, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - app_protocol (str): The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.. [optional] # noqa: E501 + app_protocol (str): The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.. [optional] # noqa: E501 name (str): The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.. [optional] # noqa: E501 node_port (int): The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. [optional] # noqa: E501 - protocol (str): The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. Possible enum values: - `\"SCTP\"` is the SCTP protocol. - `\"TCP\"` is the TCP protocol. - `\"UDP\"` is the UDP protocol.. [optional] # noqa: E501 + protocol (str): The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.. [optional] # noqa: E501 target_port (str): [optional] # noqa: E501 """ diff --git a/sdks/python/client/argo_workflows/model/storage_os_volume_source.py b/sdks/python/client/argo_workflows/model/storage_os_volume_source.py index febb659bc55a..58c084c2b216 100644 --- a/sdks/python/client/argo_workflows/model/storage_os_volume_source.py +++ b/sdks/python/client/argo_workflows/model/storage_os_volume_source.py @@ -148,11 +148,11 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 - volume_name (str): VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.. [optional] # noqa: E501 - volume_namespace (str): VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.. [optional] # noqa: E501 + volume_name (str): volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.. [optional] # noqa: E501 + volume_namespace (str): volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -234,11 +234,11 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 + fs_type (str): fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + read_only (bool): readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 secret_ref (LocalObjectReference): [optional] # noqa: E501 - volume_name (str): VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.. [optional] # noqa: E501 - volume_namespace (str): VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.. [optional] # noqa: E501 + volume_name (str): volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.. [optional] # noqa: E501 + volume_namespace (str): volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/model/toleration.py b/sdks/python/client/argo_workflows/model/toleration.py index 6fc5258bed13..0bb0889b54eb 100644 --- a/sdks/python/client/argo_workflows/model/toleration.py +++ b/sdks/python/client/argo_workflows/model/toleration.py @@ -55,15 +55,6 @@ class Toleration(ModelNormal): """ allowed_values = { - ('effect',): { - 'NOEXECUTE': "NoExecute", - 'NOSCHEDULE': "NoSchedule", - 'PREFERNOSCHEDULE': "PreferNoSchedule", - }, - ('operator',): { - 'EQUAL': "Equal", - 'EXISTS': "Exists", - }, } validations = { @@ -151,9 +142,9 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - effect (str): Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. Possible enum values: - `\"NoExecute\"` Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController. - `\"NoSchedule\"` Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler. - `\"PreferNoSchedule\"` Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler.. [optional] # noqa: E501 + effect (str): Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.. [optional] # noqa: E501 key (str): Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.. [optional] # noqa: E501 - operator (str): Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. Possible enum values: - `\"Equal\"` - `\"Exists\"`. [optional] # noqa: E501 + operator (str): Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.. [optional] # noqa: E501 toleration_seconds (int): TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.. [optional] # noqa: E501 value (str): Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.. [optional] # noqa: E501 """ @@ -237,9 +228,9 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - effect (str): Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. Possible enum values: - `\"NoExecute\"` Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController. - `\"NoSchedule\"` Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler. - `\"PreferNoSchedule\"` Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler.. [optional] # noqa: E501 + effect (str): Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.. [optional] # noqa: E501 key (str): Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.. [optional] # noqa: E501 - operator (str): Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. Possible enum values: - `\"Equal\"` - `\"Exists\"`. [optional] # noqa: E501 + operator (str): Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.. [optional] # noqa: E501 toleration_seconds (int): TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.. [optional] # noqa: E501 value (str): Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.. [optional] # noqa: E501 """ diff --git a/sdks/python/client/argo_workflows/model/typed_object_reference.py b/sdks/python/client/argo_workflows/model/typed_object_reference.py new file mode 100644 index 000000000000..99b0ac77673b --- /dev/null +++ b/sdks/python/client/argo_workflows/model/typed_object_reference.py @@ -0,0 +1,275 @@ +""" + Argo Workflows API + + Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argo-workflows.readthedocs.io/en/latest/ # noqa: E501 + + The version of the OpenAPI document: VERSION + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from argo_workflows.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from argo_workflows.exceptions import ApiAttributeError + + + +class TypedObjectReference(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + 'kind': (str,), # noqa: E501 + 'name': (str,), # noqa: E501 + 'api_group': (str,), # noqa: E501 + 'namespace': (str,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + + attribute_map = { + 'kind': 'kind', # noqa: E501 + 'name': 'name', # noqa: E501 + 'api_group': 'apiGroup', # noqa: E501 + 'namespace': 'namespace', # noqa: E501 + } + + read_only_vars = { + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, kind, name, *args, **kwargs): # noqa: E501 + """TypedObjectReference - a model defined in OpenAPI + + Args: + kind (str): Kind is the type of resource being referenced + name (str): Name is the name of resource being referenced + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + api_group (str): APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.. [optional] # noqa: E501 + namespace (str): Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.kind = kind + self.name = name + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, kind, name, *args, **kwargs): # noqa: E501 + """TypedObjectReference - a model defined in OpenAPI + + Args: + kind (str): Kind is the type of resource being referenced + name (str): Name is the name of resource being referenced + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + api_group (str): APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.. [optional] # noqa: E501 + namespace (str): Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.kind = kind + self.name = name + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/sdks/python/client/argo_workflows/model/volume.py b/sdks/python/client/argo_workflows/model/volume.py index ad0df855203c..7f5f0c1a54e8 100644 --- a/sdks/python/client/argo_workflows/model/volume.py +++ b/sdks/python/client/argo_workflows/model/volume.py @@ -224,7 +224,7 @@ def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501 """Volume - a model defined in OpenAPI Args: - name (str): Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + name (str): name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -338,7 +338,7 @@ def __init__(self, name, *args, **kwargs): # noqa: E501 """Volume - a model defined in OpenAPI Args: - name (str): Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + name (str): name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Keyword Args: _check_type (bool): if True, values for parameters in openapi_types diff --git a/sdks/python/client/argo_workflows/model/vsphere_virtual_disk_volume_source.py b/sdks/python/client/argo_workflows/model/vsphere_virtual_disk_volume_source.py index be95c147cfa3..ee61684c3e60 100644 --- a/sdks/python/client/argo_workflows/model/vsphere_virtual_disk_volume_source.py +++ b/sdks/python/client/argo_workflows/model/vsphere_virtual_disk_volume_source.py @@ -110,7 +110,7 @@ def _from_openapi_data(cls, volume_path, *args, **kwargs): # noqa: E501 """VsphereVirtualDiskVolumeSource - a model defined in OpenAPI Args: - volume_path (str): Path that identifies vSphere volume vmdk + volume_path (str): volumePath is the path that identifies vSphere volume vmdk Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -143,9 +143,9 @@ def _from_openapi_data(cls, volume_path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - storage_policy_id (str): Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.. [optional] # noqa: E501 - storage_policy_name (str): Storage Policy Based Management (SPBM) profile name.. [optional] # noqa: E501 + fs_type (str): fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + storage_policy_id (str): storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.. [optional] # noqa: E501 + storage_policy_name (str): storagePolicyName is the storage Policy Based Management (SPBM) profile name.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -198,7 +198,7 @@ def __init__(self, volume_path, *args, **kwargs): # noqa: E501 """VsphereVirtualDiskVolumeSource - a model defined in OpenAPI Args: - volume_path (str): Path that identifies vSphere volume vmdk + volume_path (str): volumePath is the path that identifies vSphere volume vmdk Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -231,9 +231,9 @@ def __init__(self, volume_path, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 - storage_policy_id (str): Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.. [optional] # noqa: E501 - storage_policy_name (str): Storage Policy Based Management (SPBM) profile name.. [optional] # noqa: E501 + fs_type (str): fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501 + storage_policy_id (str): storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.. [optional] # noqa: E501 + storage_policy_name (str): storagePolicyName is the storage Policy Based Management (SPBM) profile name.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/sdks/python/client/argo_workflows/models/__init__.py b/sdks/python/client/argo_workflows/models/__init__.py index 44ef0697df2c..fcce5f693e72 100644 --- a/sdks/python/client/argo_workflows/models/__init__.py +++ b/sdks/python/client/argo_workflows/models/__init__.py @@ -23,6 +23,7 @@ from argo_workflows.model.config_map_volume_source import ConfigMapVolumeSource from argo_workflows.model.container import Container from argo_workflows.model.container_port import ContainerPort +from argo_workflows.model.container_resize_policy import ContainerResizePolicy from argo_workflows.model.create_options import CreateOptions from argo_workflows.model.downward_api_projection import DownwardAPIProjection from argo_workflows.model.downward_api_volume_file import DownwardAPIVolumeFile @@ -367,6 +368,7 @@ from argo_workflows.model.projected_volume_source import ProjectedVolumeSource from argo_workflows.model.quobyte_volume_source import QuobyteVolumeSource from argo_workflows.model.rbd_volume_source import RBDVolumeSource +from argo_workflows.model.resource_claim import ResourceClaim from argo_workflows.model.resource_field_selector import ResourceFieldSelector from argo_workflows.model.resource_requirements import ResourceRequirements from argo_workflows.model.se_linux_options import SELinuxOptions @@ -396,6 +398,7 @@ from argo_workflows.model.tcp_socket_action import TCPSocketAction from argo_workflows.model.toleration import Toleration from argo_workflows.model.typed_local_object_reference import TypedLocalObjectReference +from argo_workflows.model.typed_object_reference import TypedObjectReference from argo_workflows.model.volume import Volume from argo_workflows.model.volume_device import VolumeDevice from argo_workflows.model.volume_mount import VolumeMount diff --git a/sdks/python/client/docs/AWSElasticBlockStoreVolumeSource.md b/sdks/python/client/docs/AWSElasticBlockStoreVolumeSource.md index aae0b49e9a51..b85e3f4b374a 100644 --- a/sdks/python/client/docs/AWSElasticBlockStoreVolumeSource.md +++ b/sdks/python/client/docs/AWSElasticBlockStoreVolumeSource.md @@ -5,10 +5,10 @@ Represents a Persistent Disk resource in AWS. An AWS EBS disk must exist before ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**volume_id** | **str** | Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | -**fs_type** | **str** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] -**partition** | **int** | The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). | [optional] -**read_only** | **bool** | Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] +**volume_id** | **str** | volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | +**fs_type** | **str** | fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] +**partition** | **int** | partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). | [optional] +**read_only** | **bool** | readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ArchivedWorkflowServiceApi.md b/sdks/python/client/docs/ArchivedWorkflowServiceApi.md index 228c380586b8..63735b50c32c 100644 --- a/sdks/python/client/docs/ArchivedWorkflowServiceApi.md +++ b/sdks/python/client/docs/ArchivedWorkflowServiceApi.md @@ -312,12 +312,13 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) namespace = "namespace_example" # str | (optional) # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_archived_workflow_label_values(list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, namespace=namespace) + api_response = api_instance.list_archived_workflow_label_values(list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events, namespace=namespace) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling ArchivedWorkflowServiceApi->list_archived_workflow_label_values: %s\n" % e) @@ -337,6 +338,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **namespace** | **str**| | [optional] ### Return type @@ -408,13 +410,14 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) name_prefix = "namePrefix_example" # str | (optional) namespace = "namespace_example" # str | (optional) # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_archived_workflows(list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, name_prefix=name_prefix, namespace=namespace) + api_response = api_instance.list_archived_workflows(list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events, name_prefix=name_prefix, namespace=namespace) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling ArchivedWorkflowServiceApi->list_archived_workflows: %s\n" % e) @@ -434,6 +437,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **name_prefix** | **str**| | [optional] **namespace** | **str**| | [optional] diff --git a/sdks/python/client/docs/AzureDiskVolumeSource.md b/sdks/python/client/docs/AzureDiskVolumeSource.md index 08b52b45e73f..924e36c72d02 100644 --- a/sdks/python/client/docs/AzureDiskVolumeSource.md +++ b/sdks/python/client/docs/AzureDiskVolumeSource.md @@ -5,12 +5,12 @@ AzureDisk represents an Azure Data Disk mount on the host and bind mount to the ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**disk_name** | **str** | The Name of the data disk in the blob storage | -**disk_uri** | **str** | The URI the data disk in the blob storage | -**caching_mode** | **str** | Host Caching mode: None, Read Only, Read Write. | [optional] -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**kind** | **str** | Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared | [optional] -**read_only** | **bool** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**disk_name** | **str** | diskName is the Name of the data disk in the blob storage | +**disk_uri** | **str** | diskURI is the URI of data disk in the blob storage | +**caching_mode** | **str** | cachingMode is the Host Caching mode: None, Read Only, Read Write. | [optional] +**fs_type** | **str** | fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**kind** | **str** | kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared | [optional] +**read_only** | **bool** | readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/AzureFileVolumeSource.md b/sdks/python/client/docs/AzureFileVolumeSource.md index 2c799d85ab81..91f0dc247096 100644 --- a/sdks/python/client/docs/AzureFileVolumeSource.md +++ b/sdks/python/client/docs/AzureFileVolumeSource.md @@ -5,9 +5,9 @@ AzureFile represents an Azure File Service mount on the host and bind mount to t ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**secret_name** | **str** | the name of secret that contains Azure Storage Account Name and Key | -**share_name** | **str** | Share Name | -**read_only** | **bool** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**secret_name** | **str** | secretName is the name of secret that contains Azure Storage Account Name and Key | +**share_name** | **str** | shareName is the azure share Name | +**read_only** | **bool** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/CSIVolumeSource.md b/sdks/python/client/docs/CSIVolumeSource.md index ee914513c775..dbbc87bf1efc 100644 --- a/sdks/python/client/docs/CSIVolumeSource.md +++ b/sdks/python/client/docs/CSIVolumeSource.md @@ -5,11 +5,11 @@ Represents a source location of a volume to mount, managed by an external CSI dr ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**driver** | **str** | Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. | -**fs_type** | **str** | Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. | [optional] +**driver** | **str** | driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. | +**fs_type** | **str** | fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. | [optional] **node_publish_secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | [optional] -**read_only** | **bool** | Specifies a read-only configuration for the volume. Defaults to false (read/write). | [optional] -**volume_attributes** | **{str: (str,)}** | VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. | [optional] +**read_only** | **bool** | readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). | [optional] +**volume_attributes** | **{str: (str,)}** | volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/CephFSVolumeSource.md b/sdks/python/client/docs/CephFSVolumeSource.md index 30fc005fd126..8f2167650ba3 100644 --- a/sdks/python/client/docs/CephFSVolumeSource.md +++ b/sdks/python/client/docs/CephFSVolumeSource.md @@ -5,12 +5,12 @@ Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volum ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**monitors** | **[str]** | Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | -**path** | **str** | Optional: Used as the mounted root, rather than the full Ceph tree, default is / | [optional] -**read_only** | **bool** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] -**secret_file** | **str** | Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] +**monitors** | **[str]** | monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | +**path** | **str** | path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / | [optional] +**read_only** | **bool** | readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] +**secret_file** | **str** | secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] **secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | [optional] -**user** | **str** | Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] +**user** | **str** | user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/CinderVolumeSource.md b/sdks/python/client/docs/CinderVolumeSource.md index 599e76f957b8..fbd17ad21cc3 100644 --- a/sdks/python/client/docs/CinderVolumeSource.md +++ b/sdks/python/client/docs/CinderVolumeSource.md @@ -5,9 +5,9 @@ Represents a cinder volume resource in Openstack. A Cinder volume must exist bef ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**volume_id** | **str** | volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] -**read_only** | **bool** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] +**volume_id** | **str** | volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | +**fs_type** | **str** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] +**read_only** | **bool** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md | [optional] **secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/ClusterWorkflowTemplateServiceApi.md b/sdks/python/client/docs/ClusterWorkflowTemplateServiceApi.md index 5f2d50121d95..0fb3eda22bf2 100644 --- a/sdks/python/client/docs/ClusterWorkflowTemplateServiceApi.md +++ b/sdks/python/client/docs/ClusterWorkflowTemplateServiceApi.md @@ -65,7 +65,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -114,7 +113,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -123,7 +122,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -139,7 +138,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -148,7 +147,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -972,6 +971,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -1033,7 +1033,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -1078,7 +1078,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1087,7 +1087,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1103,7 +1103,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1112,7 +1112,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1538,7 +1538,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -1556,7 +1556,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1579,7 +1579,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1608,7 +1608,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1627,7 +1627,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1651,7 +1651,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1663,7 +1663,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1695,7 +1706,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -1725,7 +1736,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1740,7 +1751,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -1831,7 +1842,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1854,7 +1865,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1883,7 +1894,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1902,7 +1913,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1926,7 +1937,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1938,7 +1949,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1970,7 +1992,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -2000,7 +2022,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -2996,7 +3018,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3019,7 +3041,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3048,7 +3070,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3068,7 +3090,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -3092,7 +3114,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3104,7 +3126,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -3136,7 +3169,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -3166,7 +3199,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4139,7 +4172,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4162,7 +4195,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4191,7 +4224,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4210,7 +4243,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4234,7 +4267,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4246,7 +4279,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4278,7 +4322,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4309,7 +4353,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4358,7 +4402,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -4444,7 +4488,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4467,7 +4511,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4496,7 +4540,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4516,7 +4560,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4540,7 +4584,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4552,7 +4596,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4584,7 +4639,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4614,7 +4669,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -5255,9 +5310,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -5354,7 +5409,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -5402,12 +5456,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -5645,7 +5705,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5654,7 +5714,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5670,7 +5730,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5679,7 +5739,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -6105,7 +6165,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -6123,7 +6183,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6146,7 +6206,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6175,7 +6235,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6194,7 +6254,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6218,7 +6278,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6230,7 +6290,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6262,7 +6333,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6292,7 +6363,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6307,7 +6378,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -6398,7 +6469,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6421,7 +6492,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6450,7 +6521,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6469,7 +6540,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6493,7 +6564,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6505,7 +6576,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6537,7 +6619,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6567,7 +6649,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7563,7 +7645,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7586,7 +7668,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7615,7 +7697,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7635,7 +7717,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -7659,7 +7741,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7671,7 +7753,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -7703,7 +7796,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -7733,7 +7826,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8706,7 +8799,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8729,7 +8822,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8758,7 +8851,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8777,7 +8870,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -8801,7 +8894,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8813,7 +8906,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -8845,7 +8949,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -8876,7 +8980,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8925,7 +9029,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -9011,7 +9115,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9034,7 +9138,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9063,7 +9167,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9083,7 +9187,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -9107,7 +9211,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9119,7 +9223,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -9151,7 +9266,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -9181,7 +9296,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9822,9 +9937,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -9921,7 +10036,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -9969,12 +10083,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10204,9 +10324,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -10227,7 +10347,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10275,12 +10394,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10323,10 +10448,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -10423,7 +10548,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10471,12 +10595,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10999,7 +11129,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -11048,7 +11177,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11057,7 +11186,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11073,7 +11202,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11082,7 +11211,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11906,6 +12035,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -11967,7 +12097,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -12012,7 +12142,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12021,7 +12151,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12037,7 +12167,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12046,7 +12176,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12472,7 +12602,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -12490,7 +12620,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12513,7 +12643,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12542,7 +12672,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12561,7 +12691,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -12585,7 +12715,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12597,7 +12727,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -12629,7 +12770,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -12659,7 +12800,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12674,7 +12815,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -12765,7 +12906,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12788,7 +12929,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12817,7 +12958,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12836,7 +12977,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -12860,7 +13001,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12872,7 +13013,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -12904,7 +13056,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -12934,7 +13086,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -13930,7 +14082,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -13953,7 +14105,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -13982,7 +14134,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14002,7 +14154,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -14026,7 +14178,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14038,7 +14190,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -14070,7 +14233,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -14100,7 +14263,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15073,7 +15236,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15096,7 +15259,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15125,7 +15288,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15144,7 +15307,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15168,7 +15331,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15180,7 +15343,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15212,7 +15386,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15243,7 +15417,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15292,7 +15466,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -15378,7 +15552,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15401,7 +15575,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15430,7 +15604,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15450,7 +15624,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15474,7 +15648,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15486,7 +15660,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15518,7 +15703,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15548,7 +15733,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -16189,9 +16374,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -16288,7 +16473,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -16336,12 +16520,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -16579,7 +16769,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16588,7 +16778,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16604,7 +16794,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16613,7 +16803,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -17039,7 +17229,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -17057,7 +17247,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17080,7 +17270,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17109,7 +17299,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17128,7 +17318,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -17152,7 +17342,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17164,7 +17354,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -17196,7 +17397,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -17226,7 +17427,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17241,7 +17442,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -17332,7 +17533,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17355,7 +17556,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17384,7 +17585,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17403,7 +17604,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -17427,7 +17628,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17439,7 +17640,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -17471,7 +17683,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -17501,7 +17713,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18497,7 +18709,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18520,7 +18732,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18549,7 +18761,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18569,7 +18781,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -18593,7 +18805,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18605,7 +18817,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -18637,7 +18860,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -18667,7 +18890,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19640,7 +19863,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19663,7 +19886,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19692,7 +19915,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19711,7 +19934,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -19735,7 +19958,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19747,7 +19970,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -19779,7 +20013,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -19810,7 +20044,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19859,7 +20093,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -19945,7 +20179,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19968,7 +20202,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19997,7 +20231,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20017,7 +20251,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -20041,7 +20275,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20053,7 +20287,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -20085,7 +20330,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -20115,7 +20360,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20756,9 +21001,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -20855,7 +21100,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -20903,12 +21147,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21138,9 +21388,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -21161,7 +21411,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21209,12 +21458,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21257,10 +21512,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -21357,7 +21612,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21405,12 +21659,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21741,11 +22001,12 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_cluster_workflow_templates(list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.list_cluster_workflow_templates(list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling ClusterWorkflowTemplateServiceApi->list_cluster_workflow_templates: %s\n" % e) @@ -21765,6 +22026,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -21837,7 +22099,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21886,7 +22147,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -21895,7 +22156,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -21911,7 +22172,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -21920,7 +22181,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22744,6 +23005,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -22805,7 +23067,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -22850,7 +23112,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22859,7 +23121,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22875,7 +23137,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22884,7 +23146,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -23310,7 +23572,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -23328,7 +23590,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23351,7 +23613,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23380,7 +23642,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23399,7 +23661,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -23423,7 +23685,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23435,7 +23697,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -23467,7 +23740,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -23497,7 +23770,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23512,7 +23785,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -23603,7 +23876,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23626,7 +23899,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23655,7 +23928,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23674,7 +23947,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -23698,7 +23971,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23710,7 +23983,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -23742,7 +24026,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -23772,7 +24056,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24768,7 +25052,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24791,7 +25075,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24820,7 +25104,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24840,7 +25124,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -24864,7 +25148,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24876,7 +25160,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -24908,7 +25203,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -24938,7 +25233,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25911,7 +26206,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25934,7 +26229,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25963,7 +26258,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25982,7 +26277,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -26006,7 +26301,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26018,7 +26313,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26050,7 +26356,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -26081,7 +26387,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26130,7 +26436,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -26216,7 +26522,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26239,7 +26545,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26268,7 +26574,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26288,7 +26594,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -26312,7 +26618,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26324,7 +26630,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26356,7 +26673,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -26386,7 +26703,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -27027,9 +27344,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -27126,7 +27443,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -27174,12 +27490,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -27417,7 +27739,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27426,7 +27748,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27442,7 +27764,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27451,7 +27773,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27877,7 +28199,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -27895,7 +28217,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -27918,7 +28240,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -27947,7 +28269,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -27966,7 +28288,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -27990,7 +28312,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28002,7 +28324,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -28034,7 +28367,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -28064,7 +28397,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28079,7 +28412,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -28170,7 +28503,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28193,7 +28526,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28222,7 +28555,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28241,7 +28574,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -28265,7 +28598,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28277,7 +28610,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -28309,7 +28653,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -28339,7 +28683,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29335,7 +29679,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29358,7 +29702,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29387,7 +29731,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29407,7 +29751,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -29431,7 +29775,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29443,7 +29787,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -29475,7 +29830,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -29505,7 +29860,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30478,7 +30833,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30501,7 +30856,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30530,7 +30885,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30549,7 +30904,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -30573,7 +30928,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30585,7 +30940,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -30617,7 +30983,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -30648,7 +31014,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30697,7 +31063,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -30783,7 +31149,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30806,7 +31172,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30835,7 +31201,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30855,7 +31221,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -30879,7 +31245,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30891,7 +31257,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -30923,7 +31300,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -30953,7 +31330,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31594,9 +31971,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -31693,7 +32070,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -31741,12 +32117,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -31976,9 +32358,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -31999,7 +32381,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32047,12 +32428,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -32095,10 +32482,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -32195,7 +32582,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32243,12 +32629,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, diff --git a/sdks/python/client/docs/ConfigMapProjection.md b/sdks/python/client/docs/ConfigMapProjection.md index 458d5ac2c188..6c36fdf3d0ed 100644 --- a/sdks/python/client/docs/ConfigMapProjection.md +++ b/sdks/python/client/docs/ConfigMapProjection.md @@ -5,9 +5,9 @@ Adapts a ConfigMap into a projected volume. The contents of the target ConfigMa ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**items** | [**[KeyToPath]**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**items** | [**[KeyToPath]**](KeyToPath.md) | items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] **name** | **str** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | [optional] -**optional** | **bool** | Specify whether the ConfigMap or its keys must be defined | [optional] +**optional** | **bool** | optional specify whether the ConfigMap or its keys must be defined | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ConfigMapVolumeSource.md b/sdks/python/client/docs/ConfigMapVolumeSource.md index d4fa4742a171..5d9b3c76dc32 100644 --- a/sdks/python/client/docs/ConfigMapVolumeSource.md +++ b/sdks/python/client/docs/ConfigMapVolumeSource.md @@ -5,10 +5,10 @@ Adapts a ConfigMap into a volume. The contents of the target ConfigMap's Data f ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**default_mode** | **int** | Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] -**items** | [**[KeyToPath]**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**default_mode** | **int** | defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**items** | [**[KeyToPath]**](KeyToPath.md) | items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] **name** | **str** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | [optional] -**optional** | **bool** | Specify whether the ConfigMap or its keys must be defined | [optional] +**optional** | **bool** | optional specify whether the ConfigMap or its keys must be defined | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/Container.md b/sdks/python/client/docs/Container.md index 6c70e01262fc..1658ea9355dd 100644 --- a/sdks/python/client/docs/Container.md +++ b/sdks/python/client/docs/Container.md @@ -5,24 +5,25 @@ A single application container that you want to run within a pod. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**image** | **str** | Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. | -**args** | **[str]** | Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell | [optional] -**command** | **[str]** | Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell | [optional] +**image** | **str** | Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. | +**args** | **[str]** | Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell | [optional] +**command** | **[str]** | Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell | [optional] **env** | [**[EnvVar]**](EnvVar.md) | List of environment variables to set in the container. Cannot be updated. | [optional] **env_from** | [**[EnvFromSource]**](EnvFromSource.md) | List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. | [optional] -**image_pull_policy** | **str** | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images Possible enum values: - `\"Always\"` means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - `\"IfNotPresent\"` means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - `\"Never\"` means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present | [optional] +**image_pull_policy** | **str** | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images | [optional] **lifecycle** | [**Lifecycle**](Lifecycle.md) | | [optional] **liveness_probe** | [**Probe**](Probe.md) | | [optional] **name** | **str** | Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. | [optional] -**ports** | [**[ContainerPort]**](ContainerPort.md) | List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. | [optional] +**ports** | [**[ContainerPort]**](ContainerPort.md) | List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. | [optional] **readiness_probe** | [**Probe**](Probe.md) | | [optional] +**resize_policy** | [**[ContainerResizePolicy]**](ContainerResizePolicy.md) | Resources resize policy for the container. | [optional] **resources** | [**ResourceRequirements**](ResourceRequirements.md) | | [optional] **security_context** | [**SecurityContext**](SecurityContext.md) | | [optional] **startup_probe** | [**Probe**](Probe.md) | | [optional] **stdin** | **bool** | Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. | [optional] **stdin_once** | **bool** | Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false | [optional] **termination_message_path** | **str** | Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. | [optional] -**termination_message_policy** | **str** | Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. Possible enum values: - `\"FallbackToLogsOnError\"` will read the most recent contents of the container logs for the container status message when the container exits with an error and the terminationMessagePath has no contents. - `\"File\"` is the default behavior and will set the container status message to the contents of the container's terminationMessagePath when the container exits. | [optional] +**termination_message_policy** | **str** | Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. | [optional] **tty** | **bool** | Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. | [optional] **volume_devices** | [**[VolumeDevice]**](VolumeDevice.md) | volumeDevices is the list of block devices to be used by the container. | [optional] **volume_mounts** | [**[VolumeMount]**](VolumeMount.md) | Pod volumes to mount into the container's filesystem. Cannot be updated. | [optional] diff --git a/sdks/python/client/docs/ContainerPort.md b/sdks/python/client/docs/ContainerPort.md index 36dfc1b46bf9..5d6b2bd44f76 100644 --- a/sdks/python/client/docs/ContainerPort.md +++ b/sdks/python/client/docs/ContainerPort.md @@ -9,7 +9,7 @@ Name | Type | Description | Notes **host_ip** | **str** | What host IP to bind the external port to. | [optional] **host_port** | **int** | Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. | [optional] **name** | **str** | If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. | [optional] -**protocol** | **str** | Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\". Possible enum values: - `\"SCTP\"` is the SCTP protocol. - `\"TCP\"` is the TCP protocol. - `\"UDP\"` is the UDP protocol. | [optional] +**protocol** | **str** | Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\". | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ContainerResizePolicy.md b/sdks/python/client/docs/ContainerResizePolicy.md new file mode 100644 index 000000000000..de631cfc8373 --- /dev/null +++ b/sdks/python/client/docs/ContainerResizePolicy.md @@ -0,0 +1,14 @@ +# ContainerResizePolicy + +ContainerResizePolicy represents resource resize policy for the container. + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**resource_name** | **str** | Name of the resource to which this resource resize policy applies. Supported values: cpu, memory. | +**restart_policy** | **str** | Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/python/client/docs/CronWorkflowServiceApi.md b/sdks/python/client/docs/CronWorkflowServiceApi.md index 455b2749c5cd..6d18ba5c635b 100644 --- a/sdks/python/client/docs/CronWorkflowServiceApi.md +++ b/sdks/python/client/docs/CronWorkflowServiceApi.md @@ -68,7 +68,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -125,7 +124,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -174,7 +172,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -183,7 +181,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -199,7 +197,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -208,7 +206,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1032,6 +1030,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -1093,7 +1092,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -1138,7 +1137,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1147,7 +1146,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1163,7 +1162,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1172,7 +1171,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1598,7 +1597,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -1616,7 +1615,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1639,7 +1638,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1668,7 +1667,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1687,7 +1686,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1711,7 +1710,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1723,7 +1722,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1755,7 +1765,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -1785,7 +1795,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1800,7 +1810,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -1891,7 +1901,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1914,7 +1924,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1943,7 +1953,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1962,7 +1972,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1986,7 +1996,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1998,7 +2008,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -2030,7 +2051,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -2060,7 +2081,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3056,7 +3077,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3079,7 +3100,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3108,7 +3129,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3128,7 +3149,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -3152,7 +3173,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3164,7 +3185,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -3196,7 +3228,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -3226,7 +3258,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4199,7 +4231,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4222,7 +4254,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4251,7 +4283,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4270,7 +4302,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4294,7 +4326,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4306,7 +4338,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4338,7 +4381,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4369,7 +4412,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4418,7 +4461,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -4504,7 +4547,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4527,7 +4570,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4556,7 +4599,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4576,7 +4619,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4600,7 +4643,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4612,7 +4655,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4644,7 +4698,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4674,7 +4728,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -5315,9 +5369,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -5414,7 +5468,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -5462,12 +5515,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -5705,7 +5764,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5714,7 +5773,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5730,7 +5789,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5739,7 +5798,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -6165,7 +6224,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -6183,7 +6242,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6206,7 +6265,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6235,7 +6294,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6254,7 +6313,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6278,7 +6337,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6290,7 +6349,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6322,7 +6392,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6352,7 +6422,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6367,7 +6437,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -6458,7 +6528,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6481,7 +6551,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6510,7 +6580,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6529,7 +6599,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6553,7 +6623,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6565,7 +6635,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6597,7 +6678,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6627,7 +6708,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7623,7 +7704,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7646,7 +7727,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7675,7 +7756,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7695,7 +7776,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -7719,7 +7800,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7731,7 +7812,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -7763,7 +7855,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -7793,7 +7885,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8766,7 +8858,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8789,7 +8881,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8818,7 +8910,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8837,7 +8929,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -8861,7 +8953,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8873,7 +8965,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -8905,7 +9008,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -8936,7 +9039,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8985,7 +9088,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -9071,7 +9174,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9094,7 +9197,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9123,7 +9226,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9143,7 +9246,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -9167,7 +9270,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9179,7 +9282,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -9211,7 +9325,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -9241,7 +9355,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9882,9 +9996,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -9981,7 +10095,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10029,12 +10142,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10264,9 +10383,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -10287,7 +10406,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10335,12 +10453,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10383,10 +10507,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -10483,7 +10607,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10531,12 +10654,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -11084,7 +11213,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -11141,7 +11269,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -11190,7 +11317,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11199,7 +11326,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11215,7 +11342,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11224,7 +11351,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12048,6 +12175,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -12109,7 +12237,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -12154,7 +12282,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12163,7 +12291,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12179,7 +12307,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12188,7 +12316,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12614,7 +12742,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -12632,7 +12760,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12655,7 +12783,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12684,7 +12812,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12703,7 +12831,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -12727,7 +12855,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12739,7 +12867,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -12771,7 +12910,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -12801,7 +12940,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12816,7 +12955,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -12907,7 +13046,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12930,7 +13069,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12959,7 +13098,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12978,7 +13117,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -13002,7 +13141,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -13014,7 +13153,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -13046,7 +13196,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -13076,7 +13226,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14072,7 +14222,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -14095,7 +14245,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -14124,7 +14274,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14144,7 +14294,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -14168,7 +14318,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14180,7 +14330,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -14212,7 +14373,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -14242,7 +14403,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15215,7 +15376,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15238,7 +15399,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15267,7 +15428,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15286,7 +15447,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15310,7 +15471,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15322,7 +15483,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15354,7 +15526,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15385,7 +15557,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15434,7 +15606,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -15520,7 +15692,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15543,7 +15715,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15572,7 +15744,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15592,7 +15764,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15616,7 +15788,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15628,7 +15800,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15660,7 +15843,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15690,7 +15873,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -16331,9 +16514,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -16430,7 +16613,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -16478,12 +16660,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -16721,7 +16909,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16730,7 +16918,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16746,7 +16934,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16755,7 +16943,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -17181,7 +17369,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -17199,7 +17387,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17222,7 +17410,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17251,7 +17439,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17270,7 +17458,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -17294,7 +17482,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17306,7 +17494,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -17338,7 +17537,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -17368,7 +17567,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17383,7 +17582,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -17474,7 +17673,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17497,7 +17696,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17526,7 +17725,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17545,7 +17744,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -17569,7 +17768,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17581,7 +17780,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -17613,7 +17823,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -17643,7 +17853,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18639,7 +18849,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18662,7 +18872,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18691,7 +18901,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18711,7 +18921,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -18735,7 +18945,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18747,7 +18957,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -18779,7 +19000,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -18809,7 +19030,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19782,7 +20003,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19805,7 +20026,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19834,7 +20055,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19853,7 +20074,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -19877,7 +20098,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19889,7 +20110,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -19921,7 +20153,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -19952,7 +20184,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20001,7 +20233,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -20087,7 +20319,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -20110,7 +20342,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -20139,7 +20371,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20159,7 +20391,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -20183,7 +20415,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20195,7 +20427,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -20227,7 +20470,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -20257,7 +20500,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20898,9 +21141,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -20997,7 +21240,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21045,12 +21287,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21280,9 +21528,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -21303,7 +21551,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21351,12 +21598,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21399,10 +21652,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -21499,7 +21752,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21547,12 +21799,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21911,6 +22169,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -21922,7 +22181,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_cron_workflows(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.list_cron_workflows(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling CronWorkflowServiceApi->list_cron_workflows: %s\n" % e) @@ -21943,6 +22202,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -22185,7 +22445,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -22242,7 +22501,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -22291,7 +22549,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22300,7 +22558,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22316,7 +22574,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22325,7 +22583,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -23149,6 +23407,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -23210,7 +23469,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -23255,7 +23514,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -23264,7 +23523,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -23280,7 +23539,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -23289,7 +23548,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -23715,7 +23974,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -23733,7 +23992,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23756,7 +24015,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23785,7 +24044,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23804,7 +24063,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -23828,7 +24087,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23840,7 +24099,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -23872,7 +24142,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -23902,7 +24172,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23917,7 +24187,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -24008,7 +24278,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24031,7 +24301,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24060,7 +24330,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24079,7 +24349,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -24103,7 +24373,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24115,7 +24385,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -24147,7 +24428,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -24177,7 +24458,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25173,7 +25454,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25196,7 +25477,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25225,7 +25506,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25245,7 +25526,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -25269,7 +25550,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25281,7 +25562,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -25313,7 +25605,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -25343,7 +25635,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26316,7 +26608,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26339,7 +26631,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26368,7 +26660,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26387,7 +26679,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -26411,7 +26703,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26423,7 +26715,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26455,7 +26758,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -26486,7 +26789,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26535,7 +26838,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -26621,7 +26924,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26644,7 +26947,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26673,7 +26976,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26693,7 +26996,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -26717,7 +27020,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26729,7 +27032,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26761,7 +27075,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -26791,7 +27105,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -27432,9 +27746,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -27531,7 +27845,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -27579,12 +27892,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -27822,7 +28141,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27831,7 +28150,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27847,7 +28166,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27856,7 +28175,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -28282,7 +28601,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -28300,7 +28619,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28323,7 +28642,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28352,7 +28671,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28371,7 +28690,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -28395,7 +28714,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28407,7 +28726,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -28439,7 +28769,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -28469,7 +28799,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28484,7 +28814,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -28575,7 +28905,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28598,7 +28928,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28627,7 +28957,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28646,7 +28976,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -28670,7 +29000,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28682,7 +29012,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -28714,7 +29055,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -28744,7 +29085,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29740,7 +30081,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29763,7 +30104,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29792,7 +30133,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29812,7 +30153,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -29836,7 +30177,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29848,7 +30189,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -29880,7 +30232,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -29910,7 +30262,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30883,7 +31235,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30906,7 +31258,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30935,7 +31287,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30954,7 +31306,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -30978,7 +31330,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30990,7 +31342,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -31022,7 +31385,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -31053,7 +31416,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31102,7 +31465,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -31188,7 +31551,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -31211,7 +31574,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -31240,7 +31603,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31260,7 +31623,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -31284,7 +31647,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31296,7 +31659,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -31328,7 +31702,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -31358,7 +31732,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31999,9 +32373,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -32098,7 +32472,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32146,12 +32519,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -32381,9 +32760,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -32404,7 +32783,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32452,12 +32830,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -32500,10 +32884,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -32600,7 +32984,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32648,12 +33031,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, diff --git a/sdks/python/client/docs/EmptyDirVolumeSource.md b/sdks/python/client/docs/EmptyDirVolumeSource.md index 345366e016d5..8d41c9c4600b 100644 --- a/sdks/python/client/docs/EmptyDirVolumeSource.md +++ b/sdks/python/client/docs/EmptyDirVolumeSource.md @@ -5,8 +5,8 @@ Represents an empty directory for a pod. Empty directory volumes support ownersh ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**medium** | **str** | What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir | [optional] -**size_limit** | **str** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] +**medium** | **str** | medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir | [optional] +**size_limit** | **str** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/EventServiceApi.md b/sdks/python/client/docs/EventServiceApi.md index f013152208e0..e6cd001e299b 100644 --- a/sdks/python/client/docs/EventServiceApi.md +++ b/sdks/python/client/docs/EventServiceApi.md @@ -55,6 +55,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -66,7 +67,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_workflow_event_bindings(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.list_workflow_event_bindings(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling EventServiceApi->list_workflow_event_bindings: %s\n" % e) @@ -87,6 +88,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/python/client/docs/EventSourceServiceApi.md b/sdks/python/client/docs/EventSourceServiceApi.md index 208e197b29ac..a2b0f6c02140 100644 --- a/sdks/python/client/docs/EventSourceServiceApi.md +++ b/sdks/python/client/docs/EventSourceServiceApi.md @@ -58,7 +58,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -1256,7 +1255,7 @@ with argo_workflows.ApiClient(configuration) as api_client: name="name_example", node_port=1, port=1, - protocol="SCTP", + protocol="protocol_example", target_port="target_port_example", ), ], @@ -1524,7 +1523,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1533,7 +1532,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1549,7 +1548,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1558,7 +1557,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1771,7 +1770,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -1789,7 +1788,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1812,7 +1811,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1841,7 +1840,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1860,7 +1859,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1884,7 +1883,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1896,7 +1895,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1928,7 +1938,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -1958,7 +1968,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1973,7 +1983,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -2025,7 +2035,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -2046,9 +2056,9 @@ with argo_workflows.ApiClient(configuration) as api_client: service_account_name="service_account_name_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -2145,7 +2155,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -2193,12 +2202,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -2853,6 +2868,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -2864,7 +2880,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_event_sources(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.list_event_sources(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling EventSourceServiceApi->list_event_sources: %s\n" % e) @@ -2885,6 +2901,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -2955,7 +2972,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -4153,7 +4169,7 @@ with argo_workflows.ApiClient(configuration) as api_client: name="name_example", node_port=1, port=1, - protocol="SCTP", + protocol="protocol_example", target_port="target_port_example", ), ], @@ -4421,7 +4437,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -4430,7 +4446,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -4446,7 +4462,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -4455,7 +4471,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -4668,7 +4684,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -4686,7 +4702,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4709,7 +4725,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4738,7 +4754,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4757,7 +4773,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4781,7 +4797,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4793,7 +4809,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4825,7 +4852,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4855,7 +4882,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4870,7 +4897,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -4922,7 +4949,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -4943,9 +4970,9 @@ with argo_workflows.ApiClient(configuration) as api_client: service_account_name="service_account_name_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -5042,7 +5069,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -5090,12 +5116,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -5460,6 +5492,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -5471,7 +5504,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.watch_event_sources(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.watch_event_sources(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling EventSourceServiceApi->watch_event_sources: %s\n" % e) @@ -5492,6 +5525,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/python/client/docs/FCVolumeSource.md b/sdks/python/client/docs/FCVolumeSource.md index 1e43db4e774d..fde52537d8b7 100644 --- a/sdks/python/client/docs/FCVolumeSource.md +++ b/sdks/python/client/docs/FCVolumeSource.md @@ -5,11 +5,11 @@ Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**lun** | **int** | Optional: FC target lun number | [optional] -**read_only** | **bool** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] -**target_wwns** | **[str]** | Optional: FC target worldwide names (WWNs) | [optional] -**wwids** | **[str]** | Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. | [optional] +**fs_type** | **str** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**lun** | **int** | lun is Optional: FC target lun number | [optional] +**read_only** | **bool** | readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**target_wwns** | **[str]** | targetWWNs is Optional: FC target worldwide names (WWNs) | [optional] +**wwids** | **[str]** | wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/FlexVolumeSource.md b/sdks/python/client/docs/FlexVolumeSource.md index 8d4b2213a2dd..a45474644e73 100644 --- a/sdks/python/client/docs/FlexVolumeSource.md +++ b/sdks/python/client/docs/FlexVolumeSource.md @@ -5,10 +5,10 @@ FlexVolume represents a generic volume resource that is provisioned/attached usi ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**driver** | **str** | Driver is the name of the driver to use for this volume. | -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. | [optional] -**options** | **{str: (str,)}** | Optional: Extra command options if any. | [optional] -**read_only** | **bool** | Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**driver** | **str** | driver is the name of the driver to use for this volume. | +**fs_type** | **str** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. | [optional] +**options** | **{str: (str,)}** | options is Optional: this field holds extra command options if any. | [optional] +**read_only** | **bool** | readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/FlockerVolumeSource.md b/sdks/python/client/docs/FlockerVolumeSource.md index 8b8e02f2ab87..16288298390f 100644 --- a/sdks/python/client/docs/FlockerVolumeSource.md +++ b/sdks/python/client/docs/FlockerVolumeSource.md @@ -5,8 +5,8 @@ Represents a Flocker volume mounted by the Flocker agent. One and only one of da ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**dataset_name** | **str** | Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated | [optional] -**dataset_uuid** | **str** | UUID of the dataset. This is unique identifier of a Flocker dataset | [optional] +**dataset_name** | **str** | datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated | [optional] +**dataset_uuid** | **str** | datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/GCEPersistentDiskVolumeSource.md b/sdks/python/client/docs/GCEPersistentDiskVolumeSource.md index 4799162ea133..74bd68273a6a 100644 --- a/sdks/python/client/docs/GCEPersistentDiskVolumeSource.md +++ b/sdks/python/client/docs/GCEPersistentDiskVolumeSource.md @@ -5,10 +5,10 @@ Represents a Persistent Disk resource in Google Compute Engine. A GCE PD must e ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**pd_name** | **str** | Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | -**fs_type** | **str** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] -**partition** | **int** | The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] -**read_only** | **bool** | ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] +**pd_name** | **str** | pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | +**fs_type** | **str** | fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] +**partition** | **int** | partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] +**read_only** | **bool** | readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/GitRepoVolumeSource.md b/sdks/python/client/docs/GitRepoVolumeSource.md index 4574d2b428d2..31d98a2fc993 100644 --- a/sdks/python/client/docs/GitRepoVolumeSource.md +++ b/sdks/python/client/docs/GitRepoVolumeSource.md @@ -5,9 +5,9 @@ Represents a volume that is populated with the contents of a git repository. Git ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**repository** | **str** | Repository URL | -**directory** | **str** | Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. | [optional] -**revision** | **str** | Commit hash for the specified revision. | [optional] +**repository** | **str** | repository is the URL | +**directory** | **str** | directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. | [optional] +**revision** | **str** | revision is the commit hash for the specified revision. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/GlusterfsVolumeSource.md b/sdks/python/client/docs/GlusterfsVolumeSource.md index 6b41f3f47039..a6c0801f32e9 100644 --- a/sdks/python/client/docs/GlusterfsVolumeSource.md +++ b/sdks/python/client/docs/GlusterfsVolumeSource.md @@ -5,9 +5,9 @@ Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**endpoints** | **str** | EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | -**path** | **str** | Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | -**read_only** | **bool** | ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | [optional] +**endpoints** | **str** | endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | +**path** | **str** | path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | +**read_only** | **bool** | readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/HTTPGetAction.md b/sdks/python/client/docs/HTTPGetAction.md index cbf0219232f2..2de001c3c28c 100644 --- a/sdks/python/client/docs/HTTPGetAction.md +++ b/sdks/python/client/docs/HTTPGetAction.md @@ -9,7 +9,7 @@ Name | Type | Description | Notes **host** | **str** | Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead. | [optional] **http_headers** | [**[HTTPHeader]**](HTTPHeader.md) | Custom headers to set in the request. HTTP allows repeated headers. | [optional] **path** | **str** | Path to access on the HTTP server. | [optional] -**scheme** | **str** | Scheme to use for connecting to the host. Defaults to HTTP. Possible enum values: - `\"HTTP\"` means that the scheme used will be http:// - `\"HTTPS\"` means that the scheme used will be https:// | [optional] +**scheme** | **str** | Scheme to use for connecting to the host. Defaults to HTTP. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/HTTPHeader.md b/sdks/python/client/docs/HTTPHeader.md index c25100fd0554..22ccb659e508 100644 --- a/sdks/python/client/docs/HTTPHeader.md +++ b/sdks/python/client/docs/HTTPHeader.md @@ -5,7 +5,7 @@ HTTPHeader describes a custom header to be used in HTTP probes ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **str** | The header field name | +**name** | **str** | The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. | **value** | **str** | The header field value | **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/HostPathVolumeSource.md b/sdks/python/client/docs/HostPathVolumeSource.md index faf829cccfd7..6abd25a8ecf3 100644 --- a/sdks/python/client/docs/HostPathVolumeSource.md +++ b/sdks/python/client/docs/HostPathVolumeSource.md @@ -5,8 +5,8 @@ Represents a host path mapped into a pod. Host path volumes do not support owner ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**path** | **str** | Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | -**type** | **str** | Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | [optional] +**path** | **str** | path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | +**type** | **str** | type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ISCSIVolumeSource.md b/sdks/python/client/docs/ISCSIVolumeSource.md index a3102c67c3d4..5d13c1d38ae1 100644 --- a/sdks/python/client/docs/ISCSIVolumeSource.md +++ b/sdks/python/client/docs/ISCSIVolumeSource.md @@ -5,16 +5,16 @@ Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**iqn** | **str** | Target iSCSI Qualified Name. | -**lun** | **int** | iSCSI Target Lun number. | -**target_portal** | **str** | iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | -**chap_auth_discovery** | **bool** | whether support iSCSI Discovery CHAP authentication | [optional] -**chap_auth_session** | **bool** | whether support iSCSI Session CHAP authentication | [optional] -**fs_type** | **str** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi | [optional] -**initiator_name** | **str** | Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. | [optional] -**iscsi_interface** | **str** | iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). | [optional] -**portals** | **[str]** | iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | [optional] -**read_only** | **bool** | ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. | [optional] +**iqn** | **str** | iqn is the target iSCSI Qualified Name. | +**lun** | **int** | lun represents iSCSI Target Lun number. | +**target_portal** | **str** | targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | +**chap_auth_discovery** | **bool** | chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication | [optional] +**chap_auth_session** | **bool** | chapAuthSession defines whether support iSCSI Session CHAP authentication | [optional] +**fs_type** | **str** | fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi | [optional] +**initiator_name** | **str** | initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface <target portal>:<volume name> will be created for the connection. | [optional] +**iscsi_interface** | **str** | iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). | [optional] +**portals** | **[str]** | portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). | [optional] +**read_only** | **bool** | readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. | [optional] **secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md index 133c91613716..3bebfe41763f 100644 --- a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md +++ b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ContainerNode.md @@ -16,6 +16,7 @@ Name | Type | Description | Notes **liveness_probe** | [**Probe**](Probe.md) | | [optional] **ports** | [**[ContainerPort]**](ContainerPort.md) | List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. | [optional] **readiness_probe** | [**Probe**](Probe.md) | | [optional] +**resize_policy** | [**[ContainerResizePolicy]**](ContainerResizePolicy.md) | Resources resize policy for the container. | [optional] **resources** | [**ResourceRequirements**](ResourceRequirements.md) | | [optional] **security_context** | [**SecurityContext**](SecurityContext.md) | | [optional] **startup_probe** | [**Probe**](Probe.md) | | [optional] diff --git a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md index 9a96612ef976..8cc3b820c4f3 100644 --- a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md +++ b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1ScriptTemplate.md @@ -17,6 +17,7 @@ Name | Type | Description | Notes **name** | **str** | Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. | [optional] **ports** | [**[ContainerPort]**](ContainerPort.md) | List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. | [optional] **readiness_probe** | [**Probe**](Probe.md) | | [optional] +**resize_policy** | [**[ContainerResizePolicy]**](ContainerResizePolicy.md) | Resources resize policy for the container. | [optional] **resources** | [**ResourceRequirements**](ResourceRequirements.md) | | [optional] **security_context** | [**SecurityContext**](SecurityContext.md) | | [optional] **startup_probe** | [**Probe**](Probe.md) | | [optional] diff --git a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md index 2b2147e7329b..bbc550c214d5 100644 --- a/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md +++ b/sdks/python/client/docs/IoArgoprojWorkflowV1alpha1UserContainer.md @@ -17,6 +17,7 @@ Name | Type | Description | Notes **mirror_volume_mounts** | **bool** | MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding | [optional] **ports** | [**[ContainerPort]**](ContainerPort.md) | List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. | [optional] **readiness_probe** | [**Probe**](Probe.md) | | [optional] +**resize_policy** | [**[ContainerResizePolicy]**](ContainerResizePolicy.md) | Resources resize policy for the container. | [optional] **resources** | [**ResourceRequirements**](ResourceRequirements.md) | | [optional] **security_context** | [**SecurityContext**](SecurityContext.md) | | [optional] **startup_probe** | [**Probe**](Probe.md) | | [optional] diff --git a/sdks/python/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md b/sdks/python/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md index 07dbace72e08..447516847841 100644 --- a/sdks/python/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md +++ b/sdks/python/client/docs/IoK8sApiPolicyV1PodDisruptionBudgetSpec.md @@ -8,6 +8,7 @@ Name | Type | Description | Notes **max_unavailable** | **str** | | [optional] **min_available** | **str** | | [optional] **selector** | [**LabelSelector**](LabelSelector.md) | | [optional] +**unhealthy_pod_eviction_policy** | **str** | UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\". Valid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy. IfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction. AlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction. Additional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field. This field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/KeyToPath.md b/sdks/python/client/docs/KeyToPath.md index 1e57cc00ab57..5102f82ccbea 100644 --- a/sdks/python/client/docs/KeyToPath.md +++ b/sdks/python/client/docs/KeyToPath.md @@ -5,9 +5,9 @@ Maps a string key to a path within a volume. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**key** | **str** | The key to project. | -**path** | **str** | The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. | -**mode** | **int** | Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**key** | **str** | key is the key to project. | +**path** | **str** | path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. | +**mode** | **int** | mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ListMeta.md b/sdks/python/client/docs/ListMeta.md index 6c7357a98e72..82a4d103cffc 100644 --- a/sdks/python/client/docs/ListMeta.md +++ b/sdks/python/client/docs/ListMeta.md @@ -8,7 +8,7 @@ Name | Type | Description | Notes **_continue** | **str** | continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. | [optional] **remaining_item_count** | **int** | remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact. | [optional] **resource_version** | **str** | String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency | [optional] -**self_link** | **str** | selfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release. | [optional] +**self_link** | **str** | Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/NFSVolumeSource.md b/sdks/python/client/docs/NFSVolumeSource.md index a514712bef47..aaad8b2f8927 100644 --- a/sdks/python/client/docs/NFSVolumeSource.md +++ b/sdks/python/client/docs/NFSVolumeSource.md @@ -5,9 +5,9 @@ Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not sup ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**path** | **str** | Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | -**server** | **str** | Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | -**read_only** | **bool** | ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | [optional] +**path** | **str** | path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | +**server** | **str** | server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | +**read_only** | **bool** | readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/NodeSelectorRequirement.md b/sdks/python/client/docs/NodeSelectorRequirement.md index 7cc72f18cb0a..41665799704a 100644 --- a/sdks/python/client/docs/NodeSelectorRequirement.md +++ b/sdks/python/client/docs/NodeSelectorRequirement.md @@ -6,7 +6,7 @@ A node selector requirement is a selector that contains values, a key, and an op Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **key** | **str** | The label key that the selector applies to. | -**operator** | **str** | Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. Possible enum values: - `\"DoesNotExist\"` - `\"Exists\"` - `\"Gt\"` - `\"In\"` - `\"Lt\"` - `\"NotIn\"` | +**operator** | **str** | Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. | **values** | **[str]** | An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/ObjectMeta.md b/sdks/python/client/docs/ObjectMeta.md index bd09367bbde5..0c646e77f7e1 100644 --- a/sdks/python/client/docs/ObjectMeta.md +++ b/sdks/python/client/docs/ObjectMeta.md @@ -5,22 +5,21 @@ ObjectMeta is metadata that all persisted resources must have, which includes al ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**annotations** | **{str: (str,)}** | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations | [optional] -**cluster_name** | **str** | The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. | [optional] +**annotations** | **{str: (str,)}** | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations | [optional] **creation_timestamp** | **datetime** | Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. | [optional] **deletion_grace_period_seconds** | **int** | Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. | [optional] **deletion_timestamp** | **datetime** | Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. | [optional] **finalizers** | **[str]** | Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. | [optional] -**generate_name** | **str** | GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency | [optional] +**generate_name** | **str** | GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will return a 409. Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency | [optional] **generation** | **int** | A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. | [optional] -**labels** | **{str: (str,)}** | Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels | [optional] +**labels** | **{str: (str,)}** | Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels | [optional] **managed_fields** | [**[ManagedFieldsEntry]**](ManagedFieldsEntry.md) | ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. | [optional] -**name** | **str** | Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names | [optional] -**namespace** | **str** | Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces | [optional] +**name** | **str** | Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names | [optional] +**namespace** | **str** | Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces | [optional] **owner_references** | [**[OwnerReference]**](OwnerReference.md) | List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. | [optional] **resource_version** | **str** | An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency | [optional] -**self_link** | **str** | SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release. | [optional] -**uid** | **str** | UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids | [optional] +**self_link** | **str** | Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. | [optional] +**uid** | **str** | UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/OwnerReference.md b/sdks/python/client/docs/OwnerReference.md index aadfce620221..499372b286b2 100644 --- a/sdks/python/client/docs/OwnerReference.md +++ b/sdks/python/client/docs/OwnerReference.md @@ -7,9 +7,9 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **api_version** | **str** | API version of the referent. | **kind** | **str** | Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | -**name** | **str** | Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names | -**uid** | **str** | UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids | -**block_owner_deletion** | **bool** | If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. | [optional] +**name** | **str** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names | +**uid** | **str** | UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids | +**block_owner_deletion** | **bool** | If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. | [optional] **controller** | **bool** | If true, this reference points to the managing controller. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/PersistentVolumeClaimCondition.md b/sdks/python/client/docs/PersistentVolumeClaimCondition.md index d84cc228076f..04430eed4ab8 100644 --- a/sdks/python/client/docs/PersistentVolumeClaimCondition.md +++ b/sdks/python/client/docs/PersistentVolumeClaimCondition.md @@ -1,16 +1,16 @@ # PersistentVolumeClaimCondition -PersistentVolumeClaimCondition contails details about state of pvc +PersistentVolumeClaimCondition contains details about state of pvc ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **status** | **str** | | -**type** | **str** | Possible enum values: - `\"FileSystemResizePending\"` - controller resize is finished and a file system resize is pending on node - `\"Resizing\"` - a user trigger resize of pvc has been started | +**type** | **str** | | **last_probe_time** | **datetime** | Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. | [optional] **last_transition_time** | **datetime** | Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers. | [optional] -**message** | **str** | Human-readable message indicating details about last transition. | [optional] -**reason** | **str** | Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. | [optional] +**message** | **str** | message is the human-readable message indicating details about last transition. | [optional] +**reason** | **str** | reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/PersistentVolumeClaimSpec.md b/sdks/python/client/docs/PersistentVolumeClaimSpec.md index 0ca7bb137164..9c70950e54f1 100644 --- a/sdks/python/client/docs/PersistentVolumeClaimSpec.md +++ b/sdks/python/client/docs/PersistentVolumeClaimSpec.md @@ -5,14 +5,14 @@ PersistentVolumeClaimSpec describes the common attributes of storage devices and ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**access_modes** | **[str]** | AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] +**access_modes** | **[str]** | accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] **data_source** | [**TypedLocalObjectReference**](TypedLocalObjectReference.md) | | [optional] -**data_source_ref** | [**TypedLocalObjectReference**](TypedLocalObjectReference.md) | | [optional] +**data_source_ref** | [**TypedObjectReference**](TypedObjectReference.md) | | [optional] **resources** | [**ResourceRequirements**](ResourceRequirements.md) | | [optional] **selector** | [**LabelSelector**](LabelSelector.md) | | [optional] -**storage_class_name** | **str** | Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 | [optional] +**storage_class_name** | **str** | storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 | [optional] **volume_mode** | **str** | volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. | [optional] -**volume_name** | **str** | VolumeName is the binding reference to the PersistentVolume backing this claim. | [optional] +**volume_name** | **str** | volumeName is the binding reference to the PersistentVolume backing this claim. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/PersistentVolumeClaimStatus.md b/sdks/python/client/docs/PersistentVolumeClaimStatus.md index 97d0a474e36d..c227688eb29b 100644 --- a/sdks/python/client/docs/PersistentVolumeClaimStatus.md +++ b/sdks/python/client/docs/PersistentVolumeClaimStatus.md @@ -5,12 +5,12 @@ PersistentVolumeClaimStatus is the current status of a persistent volume claim. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**access_modes** | **[str]** | AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] -**allocated_resources** | **{str: (str,)}** | The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] -**capacity** | **{str: (str,)}** | Represents the actual resources of the underlying volume. | [optional] -**conditions** | [**[PersistentVolumeClaimCondition]**](PersistentVolumeClaimCondition.md) | Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. | [optional] -**phase** | **str** | Phase represents the current phase of PersistentVolumeClaim. Possible enum values: - `\"Bound\"` used for PersistentVolumeClaims that are bound - `\"Lost\"` used for PersistentVolumeClaims that lost their underlying PersistentVolume. The claim was bound to a PersistentVolume and this volume does not exist any longer and all data on it was lost. - `\"Pending\"` used for PersistentVolumeClaims that are not yet bound | [optional] -**resize_status** | **str** | ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] +**access_modes** | **[str]** | accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 | [optional] +**allocated_resources** | **{str: (str,)}** | allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] +**capacity** | **{str: (str,)}** | capacity represents the actual resources of the underlying volume. | [optional] +**conditions** | [**[PersistentVolumeClaimCondition]**](PersistentVolumeClaimCondition.md) | conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. | [optional] +**phase** | **str** | phase represents the current phase of PersistentVolumeClaim. | [optional] +**resize_status** | **str** | resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/PersistentVolumeClaimVolumeSource.md b/sdks/python/client/docs/PersistentVolumeClaimVolumeSource.md index 566ced71f884..53a522ed42bc 100644 --- a/sdks/python/client/docs/PersistentVolumeClaimVolumeSource.md +++ b/sdks/python/client/docs/PersistentVolumeClaimVolumeSource.md @@ -5,8 +5,8 @@ PersistentVolumeClaimVolumeSource references the user's PVC in the same namespac ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**claim_name** | **str** | ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims | -**read_only** | **bool** | Will force the ReadOnly setting in VolumeMounts. Default false. | [optional] +**claim_name** | **str** | claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims | +**read_only** | **bool** | readOnly Will force the ReadOnly setting in VolumeMounts. Default false. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/PhotonPersistentDiskVolumeSource.md b/sdks/python/client/docs/PhotonPersistentDiskVolumeSource.md index 4456bd8e1941..c1791f648ce7 100644 --- a/sdks/python/client/docs/PhotonPersistentDiskVolumeSource.md +++ b/sdks/python/client/docs/PhotonPersistentDiskVolumeSource.md @@ -5,8 +5,8 @@ Represents a Photon Controller persistent disk resource. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**pd_id** | **str** | ID that identifies Photon Controller persistent disk | -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**pd_id** | **str** | pdID is the ID that identifies Photon Controller persistent disk | +**fs_type** | **str** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/PodAffinityTerm.md b/sdks/python/client/docs/PodAffinityTerm.md index e4aa102f641b..02a4e0a81755 100644 --- a/sdks/python/client/docs/PodAffinityTerm.md +++ b/sdks/python/client/docs/PodAffinityTerm.md @@ -8,7 +8,7 @@ Name | Type | Description | Notes **topology_key** | **str** | This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. | **label_selector** | [**LabelSelector**](LabelSelector.md) | | [optional] **namespace_selector** | [**LabelSelector**](LabelSelector.md) | | [optional] -**namespaces** | **[str]** | namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\" | [optional] +**namespaces** | **[str]** | namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\". | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/PodSecurityContext.md b/sdks/python/client/docs/PodSecurityContext.md index e146150529a2..30b74a2167a8 100644 --- a/sdks/python/client/docs/PodSecurityContext.md +++ b/sdks/python/client/docs/PodSecurityContext.md @@ -12,7 +12,7 @@ Name | Type | Description | Notes **run_as_user** | **int** | The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. | [optional] **se_linux_options** | [**SELinuxOptions**](SELinuxOptions.md) | | [optional] **seccomp_profile** | [**SeccompProfile**](SeccompProfile.md) | | [optional] -**supplemental_groups** | **[int]** | A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows. | [optional] +**supplemental_groups** | **[int]** | A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. | [optional] **sysctls** | [**[Sysctl]**](Sysctl.md) | Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. | [optional] **windows_options** | [**WindowsSecurityContextOptions**](WindowsSecurityContextOptions.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/PortworxVolumeSource.md b/sdks/python/client/docs/PortworxVolumeSource.md index c5beb6abac1f..3d4c6931f2b1 100644 --- a/sdks/python/client/docs/PortworxVolumeSource.md +++ b/sdks/python/client/docs/PortworxVolumeSource.md @@ -5,9 +5,9 @@ PortworxVolumeSource represents a Portworx volume resource. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**volume_id** | **str** | VolumeID uniquely identifies a Portworx volume | -**fs_type** | **str** | FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**read_only** | **bool** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**volume_id** | **str** | volumeID uniquely identifies a Portworx volume | +**fs_type** | **str** | fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**read_only** | **bool** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ProjectedVolumeSource.md b/sdks/python/client/docs/ProjectedVolumeSource.md index ef201cb06090..885e63456b66 100644 --- a/sdks/python/client/docs/ProjectedVolumeSource.md +++ b/sdks/python/client/docs/ProjectedVolumeSource.md @@ -5,8 +5,8 @@ Represents a projected volume source ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**default_mode** | **int** | Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] -**sources** | [**[VolumeProjection]**](VolumeProjection.md) | list of volume projections | [optional] +**default_mode** | **int** | defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**sources** | [**[VolumeProjection]**](VolumeProjection.md) | sources is the list of volume projections | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/QuobyteVolumeSource.md b/sdks/python/client/docs/QuobyteVolumeSource.md index f5d3ddc6d667..4f04a49ca1c7 100644 --- a/sdks/python/client/docs/QuobyteVolumeSource.md +++ b/sdks/python/client/docs/QuobyteVolumeSource.md @@ -5,12 +5,12 @@ Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**registry** | **str** | Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes | -**volume** | **str** | Volume is a string that references an already created Quobyte volume by name. | -**group** | **str** | Group to map volume access to Default is no group | [optional] -**read_only** | **bool** | ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. | [optional] -**tenant** | **str** | Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin | [optional] -**user** | **str** | User to map volume access to Defaults to serivceaccount user | [optional] +**registry** | **str** | registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes | +**volume** | **str** | volume is a string that references an already created Quobyte volume by name. | +**group** | **str** | group to map volume access to Default is no group | [optional] +**read_only** | **bool** | readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. | [optional] +**tenant** | **str** | tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin | [optional] +**user** | **str** | user to map volume access to Defaults to serivceaccount user | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/RBDVolumeSource.md b/sdks/python/client/docs/RBDVolumeSource.md index 64324677561b..38149dd5bb7a 100644 --- a/sdks/python/client/docs/RBDVolumeSource.md +++ b/sdks/python/client/docs/RBDVolumeSource.md @@ -5,14 +5,14 @@ Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volu ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**image** | **str** | The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | -**monitors** | **[str]** | A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | -**fs_type** | **str** | Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd | [optional] -**keyring** | **str** | Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] -**pool** | **str** | The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] -**read_only** | **bool** | ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**image** | **str** | image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | +**monitors** | **[str]** | monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | +**fs_type** | **str** | fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd | [optional] +**keyring** | **str** | keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**pool** | **str** | pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**read_only** | **bool** | readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] **secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | [optional] -**user** | **str** | The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] +**user** | **str** | user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ResourceClaim.md b/sdks/python/client/docs/ResourceClaim.md new file mode 100644 index 000000000000..6bc50fcf05db --- /dev/null +++ b/sdks/python/client/docs/ResourceClaim.md @@ -0,0 +1,13 @@ +# ResourceClaim + +ResourceClaim references one entry in PodSpec.ResourceClaims. + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/python/client/docs/ResourceFieldSelector.md b/sdks/python/client/docs/ResourceFieldSelector.md index 306a7e20ead3..2e6015d0c870 100644 --- a/sdks/python/client/docs/ResourceFieldSelector.md +++ b/sdks/python/client/docs/ResourceFieldSelector.md @@ -7,7 +7,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **resource** | **str** | Required: resource to select | **container_name** | **str** | Container name: required for volumes, optional for env vars | [optional] -**divisor** | **str** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] +**divisor** | **str** | Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: ``` <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ``` No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: - 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ResourceRequirements.md b/sdks/python/client/docs/ResourceRequirements.md index 6fcd52166776..4c0399c740db 100644 --- a/sdks/python/client/docs/ResourceRequirements.md +++ b/sdks/python/client/docs/ResourceRequirements.md @@ -5,8 +5,9 @@ ResourceRequirements describes the compute resource requirements. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**claims** | [**[ResourceClaim]**](ResourceClaim.md) | Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. | [optional] **limits** | **{str: (str,)}** | Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | [optional] -**requests** | **{str: (str,)}** | Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | [optional] +**requests** | **{str: (str,)}** | Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ScaleIOVolumeSource.md b/sdks/python/client/docs/ScaleIOVolumeSource.md index cacc9eee7bcd..f3b8fe1ce052 100644 --- a/sdks/python/client/docs/ScaleIOVolumeSource.md +++ b/sdks/python/client/docs/ScaleIOVolumeSource.md @@ -5,16 +5,16 @@ ScaleIOVolumeSource represents a persistent ScaleIO volume ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**gateway** | **str** | The host address of the ScaleIO API Gateway. | +**gateway** | **str** | gateway is the host address of the ScaleIO API Gateway. | **secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | -**system** | **str** | The name of the storage system as configured in ScaleIO. | -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\". | [optional] -**protection_domain** | **str** | The name of the ScaleIO Protection Domain for the configured storage. | [optional] -**read_only** | **bool** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] -**ssl_enabled** | **bool** | Flag to enable/disable SSL communication with Gateway, default false | [optional] -**storage_mode** | **str** | Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. | [optional] -**storage_pool** | **str** | The ScaleIO Storage Pool associated with the protection domain. | [optional] -**volume_name** | **str** | The name of a volume already created in the ScaleIO system that is associated with this volume source. | [optional] +**system** | **str** | system is the name of the storage system as configured in ScaleIO. | +**fs_type** | **str** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\". | [optional] +**protection_domain** | **str** | protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. | [optional] +**read_only** | **bool** | readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**ssl_enabled** | **bool** | sslEnabled Flag enable/disable SSL communication with Gateway, default false | [optional] +**storage_mode** | **str** | storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. | [optional] +**storage_pool** | **str** | storagePool is the ScaleIO Storage Pool associated with the protection domain. | [optional] +**volume_name** | **str** | volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/SeccompProfile.md b/sdks/python/client/docs/SeccompProfile.md index 3855ebb15e6e..2a0517b81317 100644 --- a/sdks/python/client/docs/SeccompProfile.md +++ b/sdks/python/client/docs/SeccompProfile.md @@ -5,7 +5,7 @@ SeccompProfile defines a pod/container's seccomp profile settings. Only one prof ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**type** | **str** | type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. Possible enum values: - `\"Localhost\"` indicates a profile defined in a file on the node should be used. The file's location relative to <kubelet-root-dir>/seccomp. - `\"RuntimeDefault\"` represents the default container runtime seccomp profile. - `\"Unconfined\"` indicates no seccomp profile is applied (A.K.A. unconfined). | +**type** | **str** | type indicates which kind of seccomp profile will be applied. Valid options are: Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. | **localhost_profile** | **str** | localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\". | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/SecretProjection.md b/sdks/python/client/docs/SecretProjection.md index f9a2d1bc3e89..1b6bea62daa3 100644 --- a/sdks/python/client/docs/SecretProjection.md +++ b/sdks/python/client/docs/SecretProjection.md @@ -5,9 +5,9 @@ Adapts a secret into a projected volume. The contents of the target Secret's Da ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**items** | [**[KeyToPath]**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**items** | [**[KeyToPath]**](KeyToPath.md) | items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] **name** | **str** | Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | [optional] -**optional** | **bool** | Specify whether the Secret or its key must be defined | [optional] +**optional** | **bool** | optional field specify whether the Secret or its key must be defined | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/SecretVolumeSource.md b/sdks/python/client/docs/SecretVolumeSource.md index ce9194bba8b0..c252cabd1345 100644 --- a/sdks/python/client/docs/SecretVolumeSource.md +++ b/sdks/python/client/docs/SecretVolumeSource.md @@ -5,10 +5,10 @@ Adapts a Secret into a volume. The contents of the target Secret's Data field w ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**default_mode** | **int** | Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] -**items** | [**[KeyToPath]**](KeyToPath.md) | If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] -**optional** | **bool** | Specify whether the Secret or its keys must be defined | [optional] -**secret_name** | **str** | Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret | [optional] +**default_mode** | **int** | defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. | [optional] +**items** | [**[KeyToPath]**](KeyToPath.md) | items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. | [optional] +**optional** | **bool** | optional field specify whether the Secret or its keys must be defined | [optional] +**secret_name** | **str** | secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/SensorServiceApi.md b/sdks/python/client/docs/SensorServiceApi.md index e24fbcbda071..53f8942825cd 100644 --- a/sdks/python/client/docs/SensorServiceApi.md +++ b/sdks/python/client/docs/SensorServiceApi.md @@ -66,7 +66,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -174,7 +173,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -183,7 +182,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -199,7 +198,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -208,7 +207,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -421,7 +420,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -439,7 +438,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -462,7 +461,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -491,7 +490,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -510,7 +509,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -534,7 +533,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -546,7 +545,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -578,7 +588,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -608,7 +618,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -623,7 +633,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -675,7 +685,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -696,9 +706,9 @@ with argo_workflows.ApiClient(configuration) as api_client: service_account_name="service_account_name_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -795,7 +805,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -843,12 +852,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -2263,6 +2278,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -2274,7 +2290,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_sensors(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.list_sensors(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling SensorServiceApi->list_sensors: %s\n" % e) @@ -2295,6 +2311,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -2478,7 +2495,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -2586,7 +2602,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -2595,7 +2611,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -2611,7 +2627,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -2620,7 +2636,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -2833,7 +2849,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -2851,7 +2867,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -2874,7 +2890,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -2903,7 +2919,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -2922,7 +2938,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -2946,7 +2962,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -2958,7 +2974,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -2990,7 +3017,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -3020,7 +3047,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3035,7 +3062,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -3087,7 +3114,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -3108,9 +3135,9 @@ with argo_workflows.ApiClient(configuration) as api_client: service_account_name="service_account_name_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -3207,7 +3234,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -3255,12 +3281,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4487,6 +4519,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -4498,7 +4531,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.watch_sensors(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.watch_sensors(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling SensorServiceApi->watch_sensors: %s\n" % e) @@ -4519,6 +4552,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type diff --git a/sdks/python/client/docs/ServiceAccountTokenProjection.md b/sdks/python/client/docs/ServiceAccountTokenProjection.md index 8a5f503094f8..f683c3c84df4 100644 --- a/sdks/python/client/docs/ServiceAccountTokenProjection.md +++ b/sdks/python/client/docs/ServiceAccountTokenProjection.md @@ -5,9 +5,9 @@ ServiceAccountTokenProjection represents a projected service account token volum ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**path** | **str** | Path is the path relative to the mount point of the file to project the token into. | -**audience** | **str** | Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. | [optional] -**expiration_seconds** | **int** | ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. | [optional] +**path** | **str** | path is the path relative to the mount point of the file to project the token into. | +**audience** | **str** | audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. | [optional] +**expiration_seconds** | **int** | expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/ServicePort.md b/sdks/python/client/docs/ServicePort.md index 77b79f348915..7d638168880e 100644 --- a/sdks/python/client/docs/ServicePort.md +++ b/sdks/python/client/docs/ServicePort.md @@ -6,10 +6,10 @@ ServicePort contains information on service's port. Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **port** | **int** | The port that will be exposed by this service. | -**app_protocol** | **str** | The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. | [optional] +**app_protocol** | **str** | The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. | [optional] **name** | **str** | The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service. | [optional] **node_port** | **int** | The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport | [optional] -**protocol** | **str** | The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. Possible enum values: - `\"SCTP\"` is the SCTP protocol. - `\"TCP\"` is the TCP protocol. - `\"UDP\"` is the UDP protocol. | [optional] +**protocol** | **str** | The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP. | [optional] **target_port** | **str** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/StorageOSVolumeSource.md b/sdks/python/client/docs/StorageOSVolumeSource.md index 25fb14c26ee3..79543efb6618 100644 --- a/sdks/python/client/docs/StorageOSVolumeSource.md +++ b/sdks/python/client/docs/StorageOSVolumeSource.md @@ -5,11 +5,11 @@ Represents a StorageOS persistent volume resource. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**read_only** | **bool** | Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] +**fs_type** | **str** | fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**read_only** | **bool** | readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. | [optional] **secret_ref** | [**LocalObjectReference**](LocalObjectReference.md) | | [optional] -**volume_name** | **str** | VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. | [optional] -**volume_namespace** | **str** | VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. | [optional] +**volume_name** | **str** | volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. | [optional] +**volume_namespace** | **str** | volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/Toleration.md b/sdks/python/client/docs/Toleration.md index 2bf08a389930..bbb52ed553ae 100644 --- a/sdks/python/client/docs/Toleration.md +++ b/sdks/python/client/docs/Toleration.md @@ -5,9 +5,9 @@ The pod this Toleration is attached to tolerates any taint that matches the trip ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**effect** | **str** | Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. Possible enum values: - `\"NoExecute\"` Evict any already-running pods that do not tolerate the taint. Currently enforced by NodeController. - `\"NoSchedule\"` Do not allow new pods to schedule onto the node unless they tolerate the taint, but allow all pods submitted to Kubelet without going through the scheduler to start, and allow all already-running pods to continue running. Enforced by the scheduler. - `\"PreferNoSchedule\"` Like TaintEffectNoSchedule, but the scheduler tries not to schedule new pods onto the node, rather than prohibiting new pods from scheduling onto the node entirely. Enforced by the scheduler. | [optional] +**effect** | **str** | Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. | [optional] **key** | **str** | Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. | [optional] -**operator** | **str** | Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. Possible enum values: - `\"Equal\"` - `\"Exists\"` | [optional] +**operator** | **str** | Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. | [optional] **toleration_seconds** | **int** | TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. | [optional] **value** | **str** | Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/sdks/python/client/docs/TypedObjectReference.md b/sdks/python/client/docs/TypedObjectReference.md new file mode 100644 index 000000000000..c6c53950ad30 --- /dev/null +++ b/sdks/python/client/docs/TypedObjectReference.md @@ -0,0 +1,15 @@ +# TypedObjectReference + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kind** | **str** | Kind is the type of resource being referenced | +**name** | **str** | Name is the name of resource being referenced | +**api_group** | **str** | APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. | [optional] +**namespace** | **str** | Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/python/client/docs/Volume.md b/sdks/python/client/docs/Volume.md index 476da1a7629b..e2e0e30b6cbb 100644 --- a/sdks/python/client/docs/Volume.md +++ b/sdks/python/client/docs/Volume.md @@ -5,7 +5,7 @@ Volume represents a named volume in a pod that may be accessed by any container ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**name** | **str** | Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | +**name** | **str** | name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names | **aws_elastic_block_store** | [**AWSElasticBlockStoreVolumeSource**](AWSElasticBlockStoreVolumeSource.md) | | [optional] **azure_disk** | [**AzureDiskVolumeSource**](AzureDiskVolumeSource.md) | | [optional] **azure_file** | [**AzureFileVolumeSource**](AzureFileVolumeSource.md) | | [optional] diff --git a/sdks/python/client/docs/VsphereVirtualDiskVolumeSource.md b/sdks/python/client/docs/VsphereVirtualDiskVolumeSource.md index 90e3495d723b..29f4523e3f42 100644 --- a/sdks/python/client/docs/VsphereVirtualDiskVolumeSource.md +++ b/sdks/python/client/docs/VsphereVirtualDiskVolumeSource.md @@ -5,10 +5,10 @@ Represents a vSphere volume resource. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**volume_path** | **str** | Path that identifies vSphere volume vmdk | -**fs_type** | **str** | Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] -**storage_policy_id** | **str** | Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. | [optional] -**storage_policy_name** | **str** | Storage Policy Based Management (SPBM) profile name. | [optional] +**volume_path** | **str** | volumePath is the path that identifies vSphere volume vmdk | +**fs_type** | **str** | fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. | [optional] +**storage_policy_id** | **str** | storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. | [optional] +**storage_policy_name** | **str** | storagePolicyName is the storage Policy Based Management (SPBM) profile name. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/python/client/docs/WorkflowServiceApi.md b/sdks/python/client/docs/WorkflowServiceApi.md index 4296e4211333..b3e910ca39b5 100644 --- a/sdks/python/client/docs/WorkflowServiceApi.md +++ b/sdks/python/client/docs/WorkflowServiceApi.md @@ -80,7 +80,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -129,7 +128,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -138,7 +137,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -154,7 +153,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -163,7 +162,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -987,6 +986,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -1048,7 +1048,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -1093,7 +1093,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1102,7 +1102,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1118,7 +1118,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1127,7 +1127,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1553,7 +1553,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -1571,7 +1571,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1594,7 +1594,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1623,7 +1623,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1642,7 +1642,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1666,7 +1666,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1678,7 +1678,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1710,7 +1721,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -1740,7 +1751,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1755,7 +1766,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -1846,7 +1857,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1869,7 +1880,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1898,7 +1909,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1917,7 +1928,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1941,7 +1952,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1953,7 +1964,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1985,7 +2007,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -2015,7 +2037,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3011,7 +3033,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3034,7 +3056,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3063,7 +3085,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3083,7 +3105,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -3107,7 +3129,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3119,7 +3141,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -3151,7 +3184,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -3181,7 +3214,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4154,7 +4187,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4177,7 +4210,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4206,7 +4239,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4225,7 +4258,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4249,7 +4282,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4261,7 +4294,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4293,7 +4337,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4324,7 +4368,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4373,7 +4417,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -4459,7 +4503,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4482,7 +4526,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4511,7 +4555,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4531,7 +4575,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4555,7 +4599,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4567,7 +4611,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4599,7 +4654,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4629,7 +4684,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -5270,9 +5325,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -5369,7 +5424,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -5417,12 +5471,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -5660,7 +5720,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5669,7 +5729,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5685,7 +5745,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5694,7 +5754,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -6120,7 +6180,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -6138,7 +6198,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6161,7 +6221,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6190,7 +6250,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6209,7 +6269,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6233,7 +6293,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6245,7 +6305,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6277,7 +6348,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6307,7 +6378,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6322,7 +6393,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -6413,7 +6484,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6436,7 +6507,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6465,7 +6536,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6484,7 +6555,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6508,7 +6579,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6520,7 +6591,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6552,7 +6634,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6582,7 +6664,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7578,7 +7660,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7601,7 +7683,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7630,7 +7712,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7650,7 +7732,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -7674,7 +7756,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7686,7 +7768,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -7718,7 +7811,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -7748,7 +7841,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8721,7 +8814,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8744,7 +8837,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8773,7 +8866,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8792,7 +8885,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -8816,7 +8909,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8828,7 +8921,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -8860,7 +8964,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -8891,7 +8995,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8940,7 +9044,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -9026,7 +9130,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9049,7 +9153,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9078,7 +9182,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9098,7 +9202,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -9122,7 +9226,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9134,7 +9238,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -9166,7 +9281,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -9196,7 +9311,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9837,9 +9952,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -9936,7 +10051,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -9984,12 +10098,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10219,9 +10339,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -10242,7 +10362,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10290,12 +10409,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10338,10 +10463,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -10438,7 +10563,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10486,12 +10610,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -11844,7 +11974,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -11892,12 +12021,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -12140,7 +12275,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12149,7 +12284,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12165,7 +12300,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12174,7 +12309,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12600,7 +12735,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -12618,7 +12753,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12641,7 +12776,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12670,7 +12805,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12689,7 +12824,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -12713,7 +12848,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12725,7 +12860,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -12757,7 +12903,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -12787,7 +12933,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12802,7 +12948,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -12893,7 +13039,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12916,7 +13062,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12945,7 +13091,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12964,7 +13110,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -12988,7 +13134,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -13000,7 +13146,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -13032,7 +13189,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -13062,7 +13219,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14058,7 +14215,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -14081,7 +14238,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -14110,7 +14267,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14130,7 +14287,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -14154,7 +14311,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14166,7 +14323,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -14198,7 +14366,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -14228,7 +14396,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15201,7 +15369,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15224,7 +15392,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15253,7 +15421,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15272,7 +15440,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15296,7 +15464,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15308,7 +15476,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15340,7 +15519,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15371,7 +15550,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15420,7 +15599,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -15506,7 +15685,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15529,7 +15708,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15558,7 +15737,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15578,7 +15757,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15602,7 +15781,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15614,7 +15793,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15646,7 +15836,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15676,7 +15866,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -16317,9 +16507,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -16416,7 +16606,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -16464,12 +16653,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -16707,7 +16902,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16716,7 +16911,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16732,7 +16927,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16741,7 +16936,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -17565,6 +17760,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -17626,7 +17822,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -17671,7 +17867,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -17680,7 +17876,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -17696,7 +17892,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -17705,7 +17901,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -18131,7 +18327,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -18149,7 +18345,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18172,7 +18368,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18201,7 +18397,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18220,7 +18416,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -18244,7 +18440,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18256,7 +18452,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -18288,7 +18495,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -18318,7 +18525,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18333,7 +18540,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -18424,7 +18631,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18447,7 +18654,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18476,7 +18683,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18495,7 +18702,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -18519,7 +18726,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18531,7 +18738,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -18563,7 +18781,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -18593,7 +18811,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19589,7 +19807,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19612,7 +19830,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19641,7 +19859,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19661,7 +19879,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -19685,7 +19903,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19697,7 +19915,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -19729,7 +19958,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -19759,7 +19988,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20732,7 +20961,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -20755,7 +20984,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -20784,7 +21013,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20803,7 +21032,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -20827,7 +21056,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20839,7 +21068,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -20871,7 +21111,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -20902,7 +21142,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20951,7 +21191,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -21037,7 +21277,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -21060,7 +21300,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -21089,7 +21329,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -21109,7 +21349,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -21133,7 +21373,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -21145,7 +21385,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21177,7 +21428,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -21207,7 +21458,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -21848,9 +22099,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -21947,7 +22198,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21995,12 +22245,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -22238,7 +22494,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22247,7 +22503,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22263,7 +22519,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22272,7 +22528,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22698,7 +22954,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -22716,7 +22972,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -22739,7 +22995,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -22768,7 +23024,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -22787,7 +23043,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -22811,7 +23067,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -22823,7 +23079,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -22855,7 +23122,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -22885,7 +23152,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -22900,7 +23167,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -22991,7 +23258,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23014,7 +23281,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23043,7 +23310,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23062,7 +23329,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -23086,7 +23353,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23098,7 +23365,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -23130,7 +23408,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -23160,7 +23438,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24156,7 +24434,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24179,7 +24457,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24208,7 +24486,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24228,7 +24506,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -24252,7 +24530,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24264,7 +24542,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -24296,7 +24585,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -24326,7 +24615,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25299,7 +25588,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25322,7 +25611,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25351,7 +25640,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25370,7 +25659,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -25394,7 +25683,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25406,7 +25695,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -25438,7 +25738,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -25469,7 +25769,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25518,7 +25818,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -25604,7 +25904,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25627,7 +25927,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25656,7 +25956,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25676,7 +25976,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -25700,7 +26000,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25712,7 +26012,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -25744,7 +26055,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -25774,7 +26085,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26415,9 +26726,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -26514,7 +26825,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -26562,12 +26872,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26797,9 +27113,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -26820,7 +27136,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -26868,12 +27183,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26916,10 +27237,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -27016,7 +27337,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -27064,12 +27384,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -27634,7 +27960,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -27683,7 +28008,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27692,7 +28017,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27708,7 +28033,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27717,7 +28042,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -28541,6 +28866,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -28602,7 +28928,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -28647,7 +28973,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -28656,7 +28982,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -28672,7 +28998,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -28681,7 +29007,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -29107,7 +29433,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -29125,7 +29451,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29148,7 +29474,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29177,7 +29503,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29196,7 +29522,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -29220,7 +29546,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29232,7 +29558,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -29264,7 +29601,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -29294,7 +29631,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29309,7 +29646,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -29400,7 +29737,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29423,7 +29760,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29452,7 +29789,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29471,7 +29808,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -29495,7 +29832,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29507,7 +29844,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -29539,7 +29887,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -29569,7 +29917,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30565,7 +30913,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30588,7 +30936,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30617,7 +30965,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30637,7 +30985,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -30661,7 +31009,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30673,7 +31021,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -30705,7 +31064,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -30735,7 +31094,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31708,7 +32067,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -31731,7 +32090,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -31760,7 +32119,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31779,7 +32138,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -31803,7 +32162,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31815,7 +32174,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -31847,7 +32217,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -31878,7 +32248,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31927,7 +32297,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -32013,7 +32383,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -32036,7 +32406,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -32065,7 +32435,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -32085,7 +32455,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -32109,7 +32479,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -32121,7 +32491,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -32153,7 +32534,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -32183,7 +32564,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -32824,9 +33205,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -32923,7 +33304,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32971,12 +33351,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -33214,7 +33600,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -33223,7 +33609,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -33239,7 +33625,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -33248,7 +33634,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -33674,7 +34060,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -33692,7 +34078,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -33715,7 +34101,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -33744,7 +34130,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -33763,7 +34149,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -33787,7 +34173,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -33799,7 +34185,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -33831,7 +34228,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -33861,7 +34258,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -33876,7 +34273,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -33967,7 +34364,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -33990,7 +34387,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -34019,7 +34416,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -34038,7 +34435,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -34062,7 +34459,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -34074,7 +34471,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -34106,7 +34514,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -34136,7 +34544,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -35132,7 +35540,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -35155,7 +35563,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -35184,7 +35592,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -35204,7 +35612,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -35228,7 +35636,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -35240,7 +35648,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -35272,7 +35691,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -35302,7 +35721,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -36275,7 +36694,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -36298,7 +36717,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -36327,7 +36746,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -36346,7 +36765,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -36370,7 +36789,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -36382,7 +36801,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -36414,7 +36844,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -36445,7 +36875,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -36494,7 +36924,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -36580,7 +37010,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -36603,7 +37033,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -36632,7 +37062,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -36652,7 +37082,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -36676,7 +37106,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -36688,7 +37118,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -36720,7 +37161,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -36750,7 +37191,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -37391,9 +37832,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -37490,7 +37931,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -37538,12 +37978,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -37773,9 +38219,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -37796,7 +38242,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -37844,12 +38289,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -37892,10 +38343,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -37992,7 +38443,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -38040,12 +38490,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -39398,7 +39854,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -39446,12 +39901,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -39694,7 +40155,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -39703,7 +40164,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -39719,7 +40180,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -39728,7 +40189,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -40154,7 +40615,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -40172,7 +40633,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -40195,7 +40656,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -40224,7 +40685,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -40243,7 +40704,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -40267,7 +40728,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -40279,7 +40740,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -40311,7 +40783,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -40341,7 +40813,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -40356,7 +40828,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -40447,7 +40919,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -40470,7 +40942,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -40499,7 +40971,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -40518,7 +40990,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -40542,7 +41014,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -40554,7 +41026,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -40586,7 +41069,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -40616,7 +41099,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -41612,7 +42095,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -41635,7 +42118,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -41664,7 +42147,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -41684,7 +42167,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -41708,7 +42191,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -41720,7 +42203,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -41752,7 +42246,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -41782,7 +42276,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -42755,7 +43249,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -42778,7 +43272,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -42807,7 +43301,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -42826,7 +43320,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -42850,7 +43344,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -42862,7 +43356,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -42894,7 +43399,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -42925,7 +43430,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -42974,7 +43479,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -43060,7 +43565,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -43083,7 +43588,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -43112,7 +43617,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -43132,7 +43637,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -43156,7 +43661,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -43168,7 +43673,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -43200,7 +43716,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -43230,7 +43746,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -43871,9 +44387,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -43970,7 +44486,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -44018,12 +44533,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -44261,7 +44782,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -44270,7 +44791,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -44286,7 +44807,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -44295,7 +44816,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -45119,6 +45640,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -45180,7 +45702,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -45225,7 +45747,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -45234,7 +45756,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -45250,7 +45772,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -45259,7 +45781,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -45685,7 +46207,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -45703,7 +46225,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -45726,7 +46248,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -45755,7 +46277,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -45774,7 +46296,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -45798,7 +46320,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -45810,7 +46332,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -45842,7 +46375,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -45872,7 +46405,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -45887,7 +46420,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -45978,7 +46511,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -46001,7 +46534,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -46030,7 +46563,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -46049,7 +46582,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -46073,7 +46606,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -46085,7 +46618,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -46117,7 +46661,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -46147,7 +46691,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -47143,7 +47687,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -47166,7 +47710,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -47195,7 +47739,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -47215,7 +47759,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -47239,7 +47783,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -47251,7 +47795,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -47283,7 +47838,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -47313,7 +47868,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -48286,7 +48841,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -48309,7 +48864,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -48338,7 +48893,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -48357,7 +48912,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -48381,7 +48936,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -48393,7 +48948,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -48425,7 +48991,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -48456,7 +49022,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -48505,7 +49071,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -48591,7 +49157,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -48614,7 +49180,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -48643,7 +49209,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -48663,7 +49229,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -48687,7 +49253,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -48699,7 +49265,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -48731,7 +49308,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -48761,7 +49338,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -49402,9 +49979,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -49501,7 +50078,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -49549,12 +50125,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -49792,7 +50374,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -49801,7 +50383,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -49817,7 +50399,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -49826,7 +50408,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -50252,7 +50834,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -50270,7 +50852,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -50293,7 +50875,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -50322,7 +50904,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -50341,7 +50923,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -50365,7 +50947,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -50377,7 +50959,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -50409,7 +51002,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -50439,7 +51032,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -50454,7 +51047,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -50545,7 +51138,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -50568,7 +51161,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -50597,7 +51190,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -50616,7 +51209,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -50640,7 +51233,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -50652,7 +51245,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -50684,7 +51288,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -50714,7 +51318,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -51710,7 +52314,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -51733,7 +52337,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -51762,7 +52366,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -51782,7 +52386,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -51806,7 +52410,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -51818,7 +52422,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -51850,7 +52465,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -51880,7 +52495,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -52853,7 +53468,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -52876,7 +53491,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -52905,7 +53520,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -52924,7 +53539,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -52948,7 +53563,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -52960,7 +53575,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -52992,7 +53618,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -53023,7 +53649,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -53072,7 +53698,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -53158,7 +53784,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -53181,7 +53807,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -53210,7 +53836,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -53230,7 +53856,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -53254,7 +53880,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -53266,7 +53892,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -53298,7 +53935,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -53328,7 +53965,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -53969,9 +54606,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -54068,7 +54705,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -54116,12 +54752,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -54351,9 +54993,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -54374,7 +55016,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -54422,12 +55063,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -54470,10 +55117,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -54570,7 +55217,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -54618,12 +55264,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -54994,6 +55646,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) fields = "fields_example" # str | Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\". (optional) # example passing only required values which don't have defaults set @@ -55006,7 +55659,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_workflows(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, fields=fields) + api_response = api_instance.list_workflows(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events, fields=fields) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling WorkflowServiceApi->list_workflows: %s\n" % e) @@ -55027,6 +55680,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **fields** | **str**| Fields to be included or excluded in the response. e.g. \"items.spec,items.status.phase\", \"-items.status.nodes\". | [optional] ### Return type @@ -55932,6 +56586,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -55943,7 +56598,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.watch_events(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.watch_events(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling WorkflowServiceApi->watch_events: %s\n" % e) @@ -55964,6 +56619,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -56035,6 +56691,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) fields = "fields_example" # str | (optional) # example passing only required values which don't have defaults set @@ -56047,7 +56704,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.watch_workflows(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, fields=fields) + api_response = api_instance.watch_workflows(namespace, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events, fields=fields) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling WorkflowServiceApi->watch_workflows: %s\n" % e) @@ -56068,6 +56725,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] **fields** | **str**| | [optional] ### Return type diff --git a/sdks/python/client/docs/WorkflowTemplateServiceApi.md b/sdks/python/client/docs/WorkflowTemplateServiceApi.md index 8f9032d97d57..60537c860943 100644 --- a/sdks/python/client/docs/WorkflowTemplateServiceApi.md +++ b/sdks/python/client/docs/WorkflowTemplateServiceApi.md @@ -67,7 +67,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -116,7 +115,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -125,7 +124,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -141,7 +140,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -150,7 +149,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -974,6 +973,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -1035,7 +1035,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -1080,7 +1080,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1089,7 +1089,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1105,7 +1105,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1114,7 +1114,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -1540,7 +1540,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -1558,7 +1558,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1581,7 +1581,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1610,7 +1610,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1629,7 +1629,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1653,7 +1653,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1665,7 +1665,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1697,7 +1708,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -1727,7 +1738,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1742,7 +1753,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -1833,7 +1844,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1856,7 +1867,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -1885,7 +1896,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1904,7 +1915,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -1928,7 +1939,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -1940,7 +1951,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -1972,7 +1994,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -2002,7 +2024,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -2998,7 +3020,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3021,7 +3043,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -3050,7 +3072,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3070,7 +3092,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -3094,7 +3116,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -3106,7 +3128,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -3138,7 +3171,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -3168,7 +3201,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4141,7 +4174,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4164,7 +4197,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4193,7 +4226,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4212,7 +4245,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4236,7 +4269,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4248,7 +4281,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4280,7 +4324,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4311,7 +4355,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4360,7 +4404,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -4446,7 +4490,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4469,7 +4513,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -4498,7 +4542,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4518,7 +4562,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -4542,7 +4586,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -4554,7 +4598,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -4586,7 +4641,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -4616,7 +4671,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -5257,9 +5312,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -5356,7 +5411,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -5404,12 +5458,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -5647,7 +5707,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5656,7 +5716,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5672,7 +5732,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -5681,7 +5741,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -6107,7 +6167,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -6125,7 +6185,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6148,7 +6208,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6177,7 +6237,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6196,7 +6256,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6220,7 +6280,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6232,7 +6292,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6264,7 +6335,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6294,7 +6365,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6309,7 +6380,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -6400,7 +6471,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6423,7 +6494,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -6452,7 +6523,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6471,7 +6542,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -6495,7 +6566,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -6507,7 +6578,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -6539,7 +6621,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -6569,7 +6651,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7565,7 +7647,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7588,7 +7670,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -7617,7 +7699,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7637,7 +7719,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -7661,7 +7743,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -7673,7 +7755,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -7705,7 +7798,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -7735,7 +7828,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8708,7 +8801,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8731,7 +8824,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -8760,7 +8853,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8779,7 +8872,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -8803,7 +8896,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8815,7 +8908,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -8847,7 +8951,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -8878,7 +8982,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -8927,7 +9031,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -9013,7 +9117,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9036,7 +9140,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -9065,7 +9169,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9085,7 +9189,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -9109,7 +9213,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9121,7 +9225,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -9153,7 +9268,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -9183,7 +9298,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -9824,9 +9939,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -9923,7 +10038,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -9971,12 +10085,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10206,9 +10326,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -10229,7 +10349,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10277,12 +10396,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -10325,10 +10450,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -10425,7 +10550,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -10473,12 +10597,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -11008,7 +11138,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -11057,7 +11186,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11066,7 +11195,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11082,7 +11211,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11091,7 +11220,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -11915,6 +12044,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -11976,7 +12106,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -12021,7 +12151,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12030,7 +12160,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12046,7 +12176,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12055,7 +12185,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -12481,7 +12611,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -12499,7 +12629,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12522,7 +12652,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12551,7 +12681,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12570,7 +12700,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -12594,7 +12724,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12606,7 +12736,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -12638,7 +12779,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -12668,7 +12809,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12683,7 +12824,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -12774,7 +12915,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12797,7 +12938,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -12826,7 +12967,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12845,7 +12986,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -12869,7 +13010,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -12881,7 +13022,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -12913,7 +13065,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -12943,7 +13095,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -13939,7 +14091,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -13962,7 +14114,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -13991,7 +14143,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14011,7 +14163,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -14035,7 +14187,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -14047,7 +14199,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -14079,7 +14242,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -14109,7 +14272,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15082,7 +15245,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15105,7 +15268,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15134,7 +15297,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15153,7 +15316,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15177,7 +15340,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15189,7 +15352,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15221,7 +15395,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15252,7 +15426,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15301,7 +15475,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -15387,7 +15561,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15410,7 +15584,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -15439,7 +15613,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15459,7 +15633,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -15483,7 +15657,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -15495,7 +15669,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -15527,7 +15712,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -15557,7 +15742,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -16198,9 +16383,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -16297,7 +16482,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -16345,12 +16529,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -16588,7 +16778,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16597,7 +16787,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16613,7 +16803,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -16622,7 +16812,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -17048,7 +17238,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -17066,7 +17256,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17089,7 +17279,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17118,7 +17308,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17137,7 +17327,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -17161,7 +17351,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17173,7 +17363,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -17205,7 +17406,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -17235,7 +17436,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17250,7 +17451,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -17341,7 +17542,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17364,7 +17565,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -17393,7 +17594,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17412,7 +17613,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -17436,7 +17637,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -17448,7 +17649,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -17480,7 +17692,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -17510,7 +17722,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18506,7 +18718,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18529,7 +18741,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -18558,7 +18770,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18578,7 +18790,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -18602,7 +18814,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -18614,7 +18826,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -18646,7 +18869,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -18676,7 +18899,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19649,7 +19872,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19672,7 +19895,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19701,7 +19924,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19720,7 +19943,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -19744,7 +19967,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19756,7 +19979,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -19788,7 +20022,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -19819,7 +20053,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -19868,7 +20102,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -19954,7 +20188,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -19977,7 +20211,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -20006,7 +20240,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20026,7 +20260,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -20050,7 +20284,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20062,7 +20296,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -20094,7 +20339,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -20124,7 +20369,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -20765,9 +21010,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -20864,7 +21109,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -20912,12 +21156,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21147,9 +21397,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -21170,7 +21420,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21218,12 +21467,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21266,10 +21521,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -21366,7 +21621,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21414,12 +21668,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -21753,6 +22013,7 @@ with argo_workflows.ApiClient(configuration) as api_client: list_options_timeout_seconds = "listOptions.timeoutSeconds_example" # str | Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. (optional) list_options_limit = "listOptions.limit_example" # str | limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. (optional) list_options_continue = "listOptions.continue_example" # str | The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. (optional) + list_options_send_initial_events = True # bool | `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional (optional) # example passing only required values which don't have defaults set try: @@ -21764,7 +22025,7 @@ with argo_workflows.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_workflow_templates(namespace, name_pattern=name_pattern, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue) + api_response = api_instance.list_workflow_templates(namespace, name_pattern=name_pattern, list_options_label_selector=list_options_label_selector, list_options_field_selector=list_options_field_selector, list_options_watch=list_options_watch, list_options_allow_watch_bookmarks=list_options_allow_watch_bookmarks, list_options_resource_version=list_options_resource_version, list_options_resource_version_match=list_options_resource_version_match, list_options_timeout_seconds=list_options_timeout_seconds, list_options_limit=list_options_limit, list_options_continue=list_options_continue, list_options_send_initial_events=list_options_send_initial_events) pprint(api_response) except argo_workflows.ApiException as e: print("Exception when calling WorkflowTemplateServiceApi->list_workflow_templates: %s\n" % e) @@ -21786,6 +22047,7 @@ Name | Type | Description | Notes **list_options_timeout_seconds** | **str**| Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. +optional. | [optional] **list_options_limit** | **str**| limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. | [optional] **list_options_continue** | **str**| The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. | [optional] + **list_options_send_initial_events** | **bool**| `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"io.k8s.initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. +optional | [optional] ### Return type @@ -21860,7 +22122,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -21909,7 +22170,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -21918,7 +22179,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -21934,7 +22195,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -21943,7 +22204,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22767,6 +23028,7 @@ with argo_workflows.ApiClient(configuration) as api_client: "key": "key_example", }, ), + unhealthy_pod_eviction_policy="unhealthy_pod_eviction_policy_example", ), pod_gc=IoArgoprojWorkflowV1alpha1PodGC( delete_delay_duration=Duration( @@ -22828,7 +23090,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -22873,7 +23135,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22882,7 +23144,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22898,7 +23160,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -22907,7 +23169,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -23333,7 +23595,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -23351,7 +23613,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23374,7 +23636,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23403,7 +23665,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23422,7 +23684,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -23446,7 +23708,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23458,7 +23720,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -23490,7 +23763,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -23520,7 +23793,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23535,7 +23808,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -23626,7 +23899,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23649,7 +23922,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -23678,7 +23951,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23697,7 +23970,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -23721,7 +23994,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -23733,7 +24006,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -23765,7 +24049,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -23795,7 +24079,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24791,7 +25075,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24814,7 +25098,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -24843,7 +25127,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24863,7 +25147,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -24887,7 +25171,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -24899,7 +25183,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -24931,7 +25226,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -24961,7 +25256,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -25934,7 +26229,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25957,7 +26252,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -25986,7 +26281,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26005,7 +26300,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -26029,7 +26324,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26041,7 +26336,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26073,7 +26379,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -26104,7 +26410,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26153,7 +26459,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -26239,7 +26545,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26262,7 +26568,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -26291,7 +26597,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26311,7 +26617,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -26335,7 +26641,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -26347,7 +26653,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -26379,7 +26696,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -26409,7 +26726,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -27050,9 +27367,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -27149,7 +27466,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -27197,12 +27513,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -27440,7 +27762,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27449,7 +27771,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27465,7 +27787,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_expressions=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27474,7 +27796,7 @@ with argo_workflows.ApiClient(configuration) as api_client: match_fields=[ NodeSelectorRequirement( key="key_example", - operator="DoesNotExist", + operator="operator_example", values=[ "values_example", ], @@ -27900,7 +28222,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), ], image="image_example", - image_pull_policy="Always", + image_pull_policy="image_pull_policy_example", lifecycle=Lifecycle( post_start=LifecycleHandler( _exec=ExecAction( @@ -27918,7 +28240,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -27941,7 +28263,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -27970,7 +28292,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -27989,7 +28311,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -28013,7 +28335,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28025,7 +28347,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -28057,7 +28390,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -28087,7 +28420,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28102,7 +28435,7 @@ with argo_workflows.ApiClient(configuration) as api_client: stdin=True, stdin_once=True, termination_message_path="termination_message_path_example", - termination_message_policy="FallbackToLogsOnError", + termination_message_policy="termination_message_policy_example", tty=True, volume_devices=[ VolumeDevice( @@ -28193,7 +28526,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28216,7 +28549,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -28245,7 +28578,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28264,7 +28597,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -28288,7 +28621,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -28300,7 +28633,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -28332,7 +28676,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -28362,7 +28706,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29358,7 +29702,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29381,7 +29725,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -29410,7 +29754,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29430,7 +29774,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -29454,7 +29798,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -29466,7 +29810,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -29498,7 +29853,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -29528,7 +29883,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30501,7 +30856,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30524,7 +30879,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30553,7 +30908,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30572,7 +30927,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -30596,7 +30951,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30608,7 +30963,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -30640,7 +31006,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -30671,7 +31037,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30720,7 +31086,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), supplemental_groups=[ 1, @@ -30806,7 +31172,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30829,7 +31195,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), tcp_socket=TCPSocketAction( host="host_example", @@ -30858,7 +31224,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30878,7 +31244,7 @@ with argo_workflows.ApiClient(configuration) as api_client: host_ip="host_ip_example", host_port=1, name="name_example", - protocol="SCTP", + protocol="protocol_example", ), ], readiness_probe=Probe( @@ -30902,7 +31268,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -30914,7 +31280,18 @@ with argo_workflows.ApiClient(configuration) as api_client: termination_grace_period_seconds=1, timeout_seconds=1, ), + resize_policy=[ + ContainerResizePolicy( + resource_name="resource_name_example", + restart_policy="restart_policy_example", + ), + ], resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -30946,7 +31323,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ), seccomp_profile=SeccompProfile( localhost_profile="localhost_profile_example", - type="Localhost", + type="type_example", ), windows_options=WindowsSecurityContextOptions( gmsa_credential_spec="gmsa_credential_spec_example", @@ -30976,7 +31353,7 @@ with argo_workflows.ApiClient(configuration) as api_client: ], path="path_example", port="port_example", - scheme="HTTP", + scheme="scheme_example", ), initial_delay_seconds=1, period_seconds=1, @@ -31617,9 +31994,9 @@ with argo_workflows.ApiClient(configuration) as api_client: timeout="timeout_example", tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -31716,7 +32093,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -31764,12 +32140,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -31999,9 +32381,9 @@ with argo_workflows.ApiClient(configuration) as api_client: ], tolerations=[ Toleration( - effect="NoExecute", + effect="effect_example", key="key_example", - operator="Equal", + operator="operator_example", toleration_seconds=1, value="value_example", ), @@ -32022,7 +32404,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32070,12 +32451,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, @@ -32118,10 +32505,10 @@ with argo_workflows.ApiClient(configuration) as api_client: message="message_example", reason="reason_example", status="status_example", - type="FileSystemResizePending", + type="type_example", ), ], - phase="Bound", + phase="phase_example", resize_status="resize_status_example", ), ), @@ -32218,7 +32605,6 @@ with argo_workflows.ApiClient(configuration) as api_client: annotations={ "key": "key_example", }, - cluster_name="cluster_name_example", creation_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), deletion_grace_period_seconds=1, deletion_timestamp=dateutil_parser('1970-01-01T00:00:00.00Z'), @@ -32266,12 +32652,18 @@ with argo_workflows.ApiClient(configuration) as api_client: kind="kind_example", name="name_example", ), - data_source_ref=TypedLocalObjectReference( + data_source_ref=TypedObjectReference( api_group="api_group_example", kind="kind_example", name="name_example", + namespace="namespace_example", ), resources=ResourceRequirements( + claims=[ + ResourceClaim( + name="name_example", + ), + ], limits={ "key": "key_example", }, diff --git a/util/wait/backoff_test.go b/util/wait/backoff_test.go index b585d7418700..f5c6940f1bbd 100644 --- a/util/wait/backoff_test.go +++ b/util/wait/backoff_test.go @@ -25,7 +25,8 @@ func TestExponentialBackoff2(t *testing.T) { err := Backoff(wait.Backoff{Steps: 1}, func() (bool, error) { return false, nil }) - assert.Equal(t, err, wait.ErrWaitTimeout) + assert.Equal(t, err, wait.ErrorInterrupted(err)) + }) t.Run("TimeoutError", func(t *testing.T) { err := Backoff(wait.Backoff{Steps: 1}, func() (bool, error) { diff --git a/workflow/executor/resource.go b/workflow/executor/resource.go index c6bd291d90cf..027bc73b05eb 100644 --- a/workflow/executor/resource.go +++ b/workflow/executor/resource.go @@ -200,8 +200,9 @@ func (we *WorkflowExecutor) WaitResource(ctx context.Context, resourceNamespace, log.Infof("Failing for conditions: %s", failSelector) failReqs, _ = failSelector.Requirements() } - err := wait.PollImmediateInfinite(envutil.LookupEnvDurationOr("RESOURCE_STATE_CHECK_INTERVAL", time.Second*5), - func() (bool, error) { + + err := wait.PollUntilContextCancel(ctx, envutil.LookupEnvDurationOr("RESOURCE_STATE_CHECK_INTERVAL", time.Second*5), true, + func(ctx context.Context) (bool, error) { isErrRetryable, err := we.checkResourceState(ctx, selfLink, successReqs, failReqs) if err == nil { log.Infof("Returning from successful wait for resource %s in namespace %s", resourceName, resourceNamespace) @@ -216,7 +217,7 @@ func (we *WorkflowExecutor) WaitResource(ctx context.Context, resourceNamespace, return false, err }) if err != nil { - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { log.Warnf("Waiting for resource %s resulted in timeout due to repeated errors", resourceName) } else { log.Warnf("Waiting for resource %s resulted in error %v", resourceName, err)