From 0e796b41993e46b3ab5310b1738c5ab4ced6d637 Mon Sep 17 00:00:00 2001 From: Jake Hyde Date: Thu, 14 Sep 2023 19:51:56 -0400 Subject: [PATCH 01/24] Add aws out of tree cloud provider install/upgrade docs --- .../set-up-cloud-providers/amazon.md | 162 ++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 5943ee01710e..45c50e98dfa9 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -159,6 +159,168 @@ Setting the value of the tag to `owned` tells the cluster that all resources wit **Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. +:::note + +Do not tag a resource with multiple owned or shared tags. + +::: + ### Using Amazon Elastic Container Registry (ECR) The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. + +### Using Out of Tree AWS Cloud Provider for RKE2 + +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. +2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +:::note + +This option will be passed to the configuration of the various kubernetes components that run on the node, and must be overridden per component: + +::: + + - Etcd + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/etcd-role=true +``` + + - Control Plane + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kube-apiserver-arg: + - cloud-provider=external + kube-controller-manager-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/control-plane-role=true +``` + + - Worker + +``` + +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kubelet-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/worker-role=true +``` +2. Select `Aws` if relying on the above mechanism to set the provider ID. +Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. +3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: + +```yaml +spec: + rkeConfig: + additionalManifest: |- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + spec: + chart: aws-cloud-controller-manager + repo: https://kubernetes.github.io/cloud-provider-aws + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + hostNetworking: true + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + args: + - --configure-cloud-routes=false + - --v=5 + - --cloud-provider=aws +``` + +### Migrating to Out of Tree AWS Cloud Provider for RKE2 + +In order to upgrade existing cluster with in-tree cloud provider to AWS cloud controller manager, you can run stop kube controller manager and install AWS cloud controller manager in many ways. Refer to [External cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +When downtime is acceptable, you can switch to external cloud provider which removes in-tree components and then deploy charts to install AWS cloud controller manager as explained in [using out of tree cloud provider](#using-out-of-tree-aws-cloud-provider). + +When control plane cannot tolerate downtime, leader migration must be enabled to facilitate a smooth migration from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +**Important** +- [Cloud controller migration docs](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of Kubernetes upgrade. + +- Refer [Migrate to use cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to confirm if any customizations are required before migrating. + Confirm [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration) and special case around [migrating IPAM controllers](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). + + +1. Update cluster config to enable leader migration + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration=true +``` + +Note that cloud provider is still `aws` at this step. + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +2. Cordon control plane nodes so aws cloud controller pods run on nodes onlyafter upgrading to external cloud provider. + +3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for deploying cloud controller manager chart. + +Update container args to enable leader migration, +``` +- '--enable-leader-migration=true' +``` + +4. Install chart and confirm daemonset `aws-cloud-controller-manager` deploys successfully. + +5. Update provisioning cluster to change cloud provider and remove leader migration args from kube-controller. + +If upgrading Kubernetes version, set Kubernetes version as well. + +``` +cloud_provider: + name: external +``` +Remove `enable-leader-migration` from: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration=true +``` + +6. Optionally, AWS cloud controller manager can be updated to disable leader migration. Upgrade the chart and remove following section from container args: +``` +- --enable-leader-migration=true +``` From 19b92d3f12fe00d792a8b909a1992697fe897302 Mon Sep 17 00:00:00 2001 From: Jake Hyde Date: Thu, 14 Sep 2023 19:51:56 -0400 Subject: [PATCH 02/24] Add aws out of tree cloud provider install/upgrade docs --- .../set-up-cloud-providers/amazon.md | 162 ++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 5943ee01710e..45c50e98dfa9 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -159,6 +159,168 @@ Setting the value of the tag to `owned` tells the cluster that all resources wit **Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. +:::note + +Do not tag a resource with multiple owned or shared tags. + +::: + ### Using Amazon Elastic Container Registry (ECR) The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. + +### Using Out of Tree AWS Cloud Provider for RKE2 + +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. +2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +:::note + +This option will be passed to the configuration of the various kubernetes components that run on the node, and must be overridden per component: + +::: + + - Etcd + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/etcd-role=true +``` + + - Control Plane + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kube-apiserver-arg: + - cloud-provider=external + kube-controller-manager-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/control-plane-role=true +``` + + - Worker + +``` + +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kubelet-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/worker-role=true +``` +2. Select `Aws` if relying on the above mechanism to set the provider ID. +Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. +3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: + +```yaml +spec: + rkeConfig: + additionalManifest: |- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + spec: + chart: aws-cloud-controller-manager + repo: https://kubernetes.github.io/cloud-provider-aws + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + hostNetworking: true + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + args: + - --configure-cloud-routes=false + - --v=5 + - --cloud-provider=aws +``` + +### Migrating to Out of Tree AWS Cloud Provider for RKE2 + +In order to upgrade existing cluster with in-tree cloud provider to AWS cloud controller manager, you can run stop kube controller manager and install AWS cloud controller manager in many ways. Refer to [External cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +When downtime is acceptable, you can switch to external cloud provider which removes in-tree components and then deploy charts to install AWS cloud controller manager as explained in [using out of tree cloud provider](#using-out-of-tree-aws-cloud-provider). + +When control plane cannot tolerate downtime, leader migration must be enabled to facilitate a smooth migration from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +**Important** +- [Cloud controller migration docs](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of Kubernetes upgrade. + +- Refer [Migrate to use cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to confirm if any customizations are required before migrating. + Confirm [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration) and special case around [migrating IPAM controllers](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). + + +1. Update cluster config to enable leader migration + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration=true +``` + +Note that cloud provider is still `aws` at this step. + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +2. Cordon control plane nodes so aws cloud controller pods run on nodes onlyafter upgrading to external cloud provider. + +3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for deploying cloud controller manager chart. + +Update container args to enable leader migration, +``` +- '--enable-leader-migration=true' +``` + +4. Install chart and confirm daemonset `aws-cloud-controller-manager` deploys successfully. + +5. Update provisioning cluster to change cloud provider and remove leader migration args from kube-controller. + +If upgrading Kubernetes version, set Kubernetes version as well. + +``` +cloud_provider: + name: external +``` +Remove `enable-leader-migration` from: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration=true +``` + +6. Optionally, AWS cloud controller manager can be updated to disable leader migration. Upgrade the chart and remove following section from container args: +``` +- --enable-leader-migration=true +``` From 4ad30b45e7d6e3aff48452c982ca1f236bee7102 Mon Sep 17 00:00:00 2001 From: Kinara Shah Date: Wed, 6 Sep 2023 18:27:10 -0700 Subject: [PATCH 03/24] Add info for aws cloud provider --- .../set-up-cloud-providers/amazon.md | 324 +++++++++++++----- 1 file changed, 235 insertions(+), 89 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 45c50e98dfa9..369af26a56da 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,6 +21,10 @@ To set up the Amazon cloud provider, :::note Important: +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. The steps listed below are still required to set up an Amazon cloud provider. You can proceed to [set up an out-of-tree cloud provider for RKE1](#using-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. + +You can also [migrate from an in-tree to out-of-tree AWS cloud provider](#migrating-to-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to 1.27 in order to stay functional. + Starting with Kubernetes 1.23, you have to deactivate the `CSIMigrationAWS` feature gate in order to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. ::: @@ -40,71 +44,71 @@ IAM Policy for nodes with the `controlplane` role: ```json { -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } -] + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] } ``` @@ -112,24 +116,24 @@ IAM policy for nodes with the `etcd` or `worker` role: ```json { -"Version": "2012-10-17", -"Statement": [ + "Version": "2012-10-17", + "Statement": [ { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" } -] + ] } ``` @@ -161,7 +165,7 @@ Setting the value of the tag to `owned` tells the cluster that all resources wit :::note -Do not tag a resource with multiple owned or shared tags. +Do not tag a resource with multiple owned or shared tags. ::: @@ -169,9 +173,151 @@ Do not tag a resource with multiple owned or shared tags. The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. -### Using Out of Tree AWS Cloud Provider for RKE2 +### Using the Out-of-Tree AWS Cloud Provider for RKE1 + +1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. + +> When IP-based naming is used, the nodes must be named after the instance followed by the regional domain name (`ip-xxx-xxx-xxx-xxx.ec2..internal`). If you have a custom domain name set in the DHCP options, you must set `--hostname-override` on `kube-proxy` and `kubelet` to match this naming convention. + +When creating a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), you must add [`--node-name`](../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options) to the `docker run` node registration command to set `hostname-override` -- for example, `"$(hostname -f)"`). This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. + +2. Select **External (out-of-tree)** as the cloud provider. This sets `--cloud-provider=external` for Kubernetes components. This can also be done manually, by setting it in `cluster.yml`. + +``` +rancher_kubernetes_engine_config: + cloud_provider: + name: external +``` + +3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done via Helm charts in UI or manually. + +:::note + +The upstream documentation for the AWS cloud controller manager can be found [here](https://kubernetes.github.io/cloud-provider-aws). + +::: + +### Helm Chart Installation + +1. Click **☰**, then select the name of the cluster from the left navigation. + +2. Select **Apps** > **Repositories**. + +3. Click the **Create** button**. + +4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. + +5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. + +6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. + +7. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane` so you must update tolerations and the nodeSelector: + +``` +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane + +``` + +:::note + +There's currently [a known issue](https://github.com/rancher/dashboard/issues/9249) which doesn't allow nodeSelector to be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: + +``` +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +``` + +::: + +8. Add `get` to `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup. + +``` + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get +``` + +9. Update container arguments: +``` + - '--use-service-account-credentials=true' + - '--configure-cloud-routes=false' +``` + +10. Install the chart and confirm that daemonset `aws-cloud-controller-manager` deploys successfully. + +### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 + +In order to upgrade existing cluster with in-tree cloud provider to AWS cloud controller manager, you can run stop kube controller manager and install AWS cloud controller manager in many ways. Refer to [External cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +When downtime is acceptable, you can switch to external cloud provider which removes in-tree components and then deploy charts to install AWS cloud controller manager as explained in [using out of tree cloud provider](#using-out-of-tree-aws-cloud-provider). + +When control plane cannot tolerate downtime, leader migration must be enabled to facilitate a smooth migration from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +**Important** +- [Cloud controller migration docs](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of Kubernetes upgrade. + +- Refer [Migrate to use cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to confirm if any customizations are required before migrating. + Confirm [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration) and special case around [migrating IPAM controllers](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). + + +1. Update cluster config to enable leader migration in `cluster.yml` + +``` +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` +Note that cloud provider is still `aws` at this step. +``` +cloud_provider: + name: aws +``` + +2. Cordon control plane nodes so aws cloud controller pods run on nodes onlyafter upgrading to external cloud provider. + +3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-6 for [deploying cloud controller manager chart](#helm-chart-install) . + +Update container args to enable leader migration, +``` +- '--enable-leader-migration=true' +``` + +4. Install chart and confirm daemonset `aws-cloud-controller-manager` deploys successfully. + +5. Update cluster.yml to change cloud provider and remove leader migration args from kube-controller. + +If upgrading Kubernetes version, set Kubernetes version as well. + +``` +cloud_provider: + name: external +``` +Remove `enable-leader-migration` from: +``` +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +6. Optionally, AWS cloud controller manager can be updated to disable leader migration. Upgrade the chart and remove following section from container args: +``` +- --enable-leader-migration=true +``` +### Using the Out-of-Tree AWS Cloud Provider for RKE2 -1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. 2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: ```yaml @@ -187,7 +333,7 @@ This option will be passed to the configuration of the various kubernetes compon ::: - - Etcd +- Etcd ```yaml spec: @@ -199,7 +345,7 @@ spec: machineLabelSelector: rke.cattle.io/etcd-role=true ``` - - Control Plane +- Control Plane ```yaml spec: @@ -214,7 +360,7 @@ spec: machineLabelSelector: rke.cattle.io/control-plane-role=true ``` - - Worker +- Worker ``` @@ -228,7 +374,7 @@ spec: machineLabelSelector: rke.cattle.io/worker-role=true ``` 2. Select `Aws` if relying on the above mechanism to set the provider ID. -Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. + Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. 3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: ```yaml @@ -255,7 +401,7 @@ spec: - --cloud-provider=aws ``` -### Migrating to Out of Tree AWS Cloud Provider for RKE2 +### Migrating to the Out-of-Tree AWS Cloud Provider for RKE2 In order to upgrade existing cluster with in-tree cloud provider to AWS cloud controller manager, you can run stop kube controller manager and install AWS cloud controller manager in many ways. Refer to [External cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. From 6b0ecccd2286a226fc18aea800b4e5a50b4b9630 Mon Sep 17 00:00:00 2001 From: Jake Hyde Date: Fri, 22 Sep 2023 17:04:19 -0400 Subject: [PATCH 04/24] indentation fix --- .../set-up-cloud-providers/amazon.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 369af26a56da..552d83970062 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -342,7 +342,7 @@ spec: - config: kubelet-arg: - cloud-provider=external - machineLabelSelector: rke.cattle.io/etcd-role=true + machineLabelSelector: rke.cattle.io/etcd-role=true ``` - Control Plane @@ -357,22 +357,21 @@ spec: - cloud-provider=external kube-controller-manager-arg: - cloud-provider=external - machineLabelSelector: rke.cattle.io/control-plane-role=true + machineLabelSelector: rke.cattle.io/control-plane-role=true ``` - Worker -``` - +```yaml spec: rkeConfig: machineSelectorConfig: - config: - disable-cloud-controller: true kubelet-arg: - cloud-provider=external - machineLabelSelector: rke.cattle.io/worker-role=true + machineLabelSelector: rke.cattle.io/worker-role=true ``` + 2. Select `Aws` if relying on the above mechanism to set the provider ID. Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. 3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: From 49045e264c309099bae041b098f7f07d66622fee Mon Sep 17 00:00:00 2001 From: Jake Hyde Date: Thu, 28 Sep 2023 12:05:03 -0400 Subject: [PATCH 05/24] Address review comments --- .../set-up-cloud-providers/amazon.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 552d83970062..f941a584f91a 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,7 +21,7 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. The steps listed below are still required to set up an Amazon cloud provider. You can proceed to [set up an out-of-tree cloud provider for RKE1](#using-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree Cloud Providers have been removed completely, and will no longer continue to function post-upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can proceed to [set up an out-of-tree cloud provider for RKE1](#using-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. You can also [migrate from an in-tree to out-of-tree AWS cloud provider](#migrating-to-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to 1.27 in order to stay functional. @@ -469,3 +469,11 @@ spec: ``` - --enable-leader-migration=true ``` + +7. The Cloud Provider is responsible for setting the ProviderID of the node on successful + +Check if all nodes are initialized with the ProviderID with the following command: + +``` +kubectl describe nodes | grep "ProviderID" +``` From dbcf171a3210b720f16410ff0fe35e1dbb829c08 Mon Sep 17 00:00:00 2001 From: Kinara Shah Date: Sun, 22 Oct 2023 15:28:00 -0700 Subject: [PATCH 06/24] addressing review comments --- .../set-up-cloud-providers/amazon.md | 122 ++++++++++++------ 1 file changed, 81 insertions(+), 41 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index f941a584f91a..8f8f9e9cdfb9 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -175,21 +175,41 @@ The kubelet component has the ability to automatically obtain ECR credentials, w ### Using the Out-of-Tree AWS Cloud Provider for RKE1 -1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. +1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. > When IP-based naming is used, the nodes must be named after the instance followed by the regional domain name (`ip-xxx-xxx-xxx-xxx.ec2..internal`). If you have a custom domain name set in the DHCP options, you must set `--hostname-override` on `kube-proxy` and `kubelet` to match this naming convention. -When creating a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), you must add [`--node-name`](../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options) to the `docker run` node registration command to set `hostname-override` -- for example, `"$(hostname -f)"`). This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. +To meet node naming conventions, Rancher allows setting `useInstanceMetadataHostname` when `External Amazon` cloud provider is selected. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: -2. Select **External (out-of-tree)** as the cloud provider. This sets `--cloud-provider=external` for Kubernetes components. This can also be done manually, by setting it in `cluster.yml`. +``` +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true +``` + +You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), you can add [`--node-name`](../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options) to the `docker run` node registration command to set `hostname-override` - for example, `"$(hostname -f)"`). This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. + +2. Select cloud provider. + +Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and enable `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. + +::: note + +You must disable `useInstanceMetadataHostname` when setting custom node name via `node-name` for custom clusters. + +::: ``` rancher_kubernetes_engine_config: cloud_provider: - name: external + name: external-aws + useInstanceMetadataHostname: true/false ``` -3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done via Helm charts in UI or manually. +Existing clusters using **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but not facilitate setting node name. + +3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done via [Helm charts in UI](#helm-chart-installation) or manually. :::note @@ -203,7 +223,7 @@ The upstream documentation for the AWS cloud controller manager can be found [he 2. Select **Apps** > **Repositories**. -3. Click the **Create** button**. +3. Click the **Create** button. 4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. @@ -211,7 +231,26 @@ The upstream documentation for the AWS cloud controller manager can be found [he 6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. -7. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane` so you must update tolerations and the nodeSelector: +7. Add the following container args: + +``` + - '--use-service-account-credentials=true' + - '--configure-cloud-routes=false' +``` + +8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup. + +``` + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get +``` + +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane` so you must update tolerations and the nodeSelector: ``` tolerations: @@ -224,36 +263,22 @@ tolerations: ``` -:::note - -There's currently [a known issue](https://github.com/rancher/dashboard/issues/9249) which doesn't allow nodeSelector to be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: - ``` nodeSelector: node-role.kubernetes.io/controlplane: 'true' ``` +:::note for Rancher ` Date: Mon, 13 Nov 2023 15:16:16 -0500 Subject: [PATCH 07/24] Address review comments --- .../set-up-cloud-providers/amazon.md | 60 ++++++++++--------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 8f8f9e9cdfb9..bf511d4ecacc 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -188,15 +188,15 @@ rancher_kubernetes_engine_config: useInstanceMetadataHostname: true ``` -You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), you can add [`--node-name`](../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options) to the `docker run` node registration command to set `hostname-override` - for example, `"$(hostname -f)"`). This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. +You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), add [`--node-name`](../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options) to the `docker run` node registration command to set `hostname-override` —for example, `"$(hostname -f)"`). This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. 2. Select cloud provider. -Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and enable `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. +Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and enable `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. ::: note -You must disable `useInstanceMetadataHostname` when setting custom node name via `node-name` for custom clusters. +You must disable `useInstanceMetadataHostname` when setting a custom node name via `node-name` for custom clusters. ::: @@ -231,7 +231,7 @@ The upstream documentation for the AWS cloud controller manager can be found [he 6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. -7. Add the following container args: +7. Add the following container arguments: ``` - '--use-service-account-credentials=true' @@ -268,34 +268,36 @@ nodeSelector: node-role.kubernetes.io/controlplane: 'true' ``` -:::note for Rancher ` Date: Wed, 15 Nov 2023 13:52:54 -0500 Subject: [PATCH 08/24] syntax annotations, re-org sections, copy edits --- .../set-up-cloud-providers/amazon.md | 276 +++++++++--------- 1 file changed, 134 insertions(+), 142 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index d2ad0ec96085..537f8b1b0b4f 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -181,7 +181,7 @@ The kubelet component has the ability to automatically obtain ECR credentials, w To meet node naming conventions, Rancher allows setting `useInstanceMetadataHostname` when `External Amazon` cloud provider is selected. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: -``` +```yaml rancher_kubernetes_engine_config: cloud_provider: name: external-aws @@ -200,7 +200,7 @@ You must disable `useInstanceMetadataHostname` when setting a custom node name v ::: -``` +```yaml rancher_kubernetes_engine_config: cloud_provider: name: external-aws @@ -217,6 +217,94 @@ The upstream documentation for the AWS cloud controller manager can be found [he ::: +### Using Out-of-tree AWS Cloud Provider for RKE2 + +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. +2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +:::note + +This option will be passed to the configuration of the various kubernetes components that run on the node, and must be overridden per component: + +::: + +- Etcd + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/etcd-role:true +``` + +- Control Plane + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kube-apiserver-arg: + - cloud-provider=external + kube-controller-manager-arg: + - cloud-provider=external + kubelet-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/control-plane-role:true +``` + +- Worker + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: rke.cattle.io/worker-role:true +``` + +2. Select `Amazon` if relying on the above mechanism to set the provider ID. + Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. + +3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: + +```yaml +spec: + rkeConfig: + additionalManifest: |- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + spec: + chart: aws-cloud-controller-manager + repo: https://kubernetes.github.io/cloud-provider-aws + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + hostNetworking: true + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + args: + - --configure-cloud-routes=false + - --v=5 + - --cloud-provider=aws +``` + ### Helm Chart Installation 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -233,14 +321,14 @@ The upstream documentation for the AWS cloud controller manager can be found [he 7. Add the following container arguments: -``` +```yaml - '--use-service-account-credentials=true' - '--configure-cloud-routes=false' ``` 8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup. -``` +```yaml - apiGroups: - '' resources: @@ -252,7 +340,7 @@ The upstream documentation for the AWS cloud controller manager can be found [he 9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane` so you must update tolerations and the nodeSelector: -``` +```yaml tolerations: - effect: NoSchedule key: node.cloudprovider.kubernetes.io/uninitialized @@ -263,7 +351,7 @@ tolerations: ``` -``` +```yaml nodeSelector: node-role.kubernetes.io/controlplane: 'true' ``` @@ -272,7 +360,7 @@ nodeSelector: There's currently [a known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: -``` +```yaml nodeSelector: node-role.kubernetes.io/controlplane: 'true' ``` @@ -281,8 +369,7 @@ nodeSelector: 10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). -### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 - +### Migrating to the Out-of-Tree AWS Cloud Provider To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. @@ -290,16 +377,15 @@ If it's acceptable to have some downtime, you can [switch to an external cloud p If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. -**Important** -- The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. - -- Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. - Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). +:::note Important +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). +::: +#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 1. Update the cluster config to enable leader migration in `cluster.yml` -``` +```yaml services: kube-controller: extra_args: @@ -308,39 +394,39 @@ services: Note that the cloud provider is still `aws` at this step. -``` +```yaml cloud_provider: name: aws ``` -2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider. +2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: -``` +```bash kubectl cordon -l "node-role.kubernetes.io/controlplane=true" ``` -3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. -To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation): +3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation): -``` +```bash - '--enable-leader-migration=true' ``` -4. Confirm that the chart is installed but the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will uncordon each node after upgrading and `aws-controller-manager` pods will be scheduled. +4. Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will upgrade and uncordon each node, and schedule `aws-controller-manager` pods. 5. Update cluster.yml to change the cloud provider and remove the leader migration arguments from the kube-controller. Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not providing custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: -``` +```yaml rancher_kubernetes_engine_config: cloud_provider: name: external-aws useInstanceMetadataHostname: true/false ``` -**Remove** `enable-leader-migration` from: -``` +Remove `enable-leader-migration`: + +```yaml services: kube-controller: extra_args: @@ -352,114 +438,14 @@ services: 7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. 8. (Optional) After the upgrade, you can update the AWS cloud controller manager to disable leader migration. Upgrade the chart and remove the following section from the container arguments: -``` -- --enable-leader-migration=true -``` - -### Using Out-of-tree AWS Cloud Provider for RKE2 - -1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. -2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: - -```yaml -spec: - rkeConfig: - machineGlobalConfig: - cloud-provider-name: aws -``` - -:::note - -This option will be passed to the configuration of the various kubernetes components that run on the node, and must be overridden per component: - -::: - -- Etcd - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kubelet-arg: - - cloud-provider=external - machineLabelSelector: rke.cattle.io/etcd-role:true -``` - -- Control Plane - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - disable-cloud-controller: true - kube-apiserver-arg: - - cloud-provider=external - kube-controller-manager-arg: - - cloud-provider=external - kubelet-arg: - - cloud-provider=external - machineLabelSelector: rke.cattle.io/control-plane-role:true -``` - -- Worker - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kubelet-arg: - - cloud-provider=external - machineLabelSelector: rke.cattle.io/worker-role:true -``` - -2. Select `Amazon` if relying on the above mechanism to set the provider ID. - Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. - -3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: ```yaml -spec: - rkeConfig: - additionalManifest: |- - apiVersion: helm.cattle.io/v1 - kind: HelmChart - metadata: - name: aws-cloud-controller-manager - namespace: kube-system - spec: - chart: aws-cloud-controller-manager - repo: https://kubernetes.github.io/cloud-provider-aws - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - hostNetworking: true - nodeSelector: - node-role.kubernetes.io/control-plane: "true" - args: - - --configure-cloud-routes=false - - --v=5 - - --cloud-provider=aws +- --enable-leader-migration=true ``` -### Migrating to the Out-of-Tree AWS Cloud Provider for RKE2 - -In order to upgrade existing cluster with in-tree cloud provider to AWS cloud controller manager, you can run stop kube controller manager and install AWS cloud controller manager in many ways. Refer to [External cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. - -When downtime is acceptable, you can switch to external cloud provider which removes in-tree components and then deploy charts to install AWS cloud controller manager as explained in [using out of tree cloud provider](#using-out-of-tree-aws-cloud-provider). - -When control plane cannot tolerate downtime, leader migration must be enabled to facilitate a smooth migration from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. - -**Important** -- [Cloud controller migration docs](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of Kubernetes upgrade. +#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE2 -- Refer [Migrate to use cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to confirm if any customizations are required before migrating. - Confirm [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration) and special case around [migrating IPAM controllers](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). - - -1. Update cluster config to enable leader migration +1. Update the cluster config to enable leader migration: ```yaml spec: @@ -470,7 +456,7 @@ spec: - enable-leader-migration=true ``` -Note that cloud provider is still `aws` at this step. +Note that the cloud provider is still `aws` at this step: ```yaml spec: @@ -479,27 +465,32 @@ spec: cloud-provider-name: aws ``` -2. Cordon control plane nodes so aws cloud controller pods run on nodes onlyafter upgrading to external cloud provider. +2. Cordon control plane nodes so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: -3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) +```bash +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` -Update container args to enable leader migration, +3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2). -``` +Update container args to enable leader migration: + +```bash - '--enable-leader-migration=true' ``` -4. Install chart and confirm daemonset `aws-cloud-controller-manager` deploys successfully. +4. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` successfully deploys. -5. Update provisioning cluster to change cloud provider and remove leader migration args from kube-controller. +5. Update the provisioning cluster to change the cloud provider and remove leader migration args from kube-controller. -If upgrading Kubernetes version, set Kubernetes version as well. +If upgrading the Kubernetes version, set the Kubernetes version as well. -``` +```yaml cloud_provider: name: external ``` -Remove `enable-leader-migration` from: + +Remove `enable-leader-migration`: ```yaml spec: @@ -511,14 +502,15 @@ spec: ``` 6. (Optional) After the upgrade, you can update the AWS cloud controller manager to disable leader migration. Upgrade the chart and remove the following section from the container arguments: -``` + +```yaml - --enable-leader-migration=true ``` -7. The Cloud Provider is responsible for setting the ProviderID of the node on successful +7. The Cloud Provider is responsible for setting the ProviderID of the node. -Check if all nodes are initialized with the ProviderID with the following command: +Check if all nodes are initialized with the ProviderID: -``` +```bash kubectl describe nodes | grep "ProviderID" ``` From 8b25939cd3335d7a7bf7709a78720a301cae9e7d Mon Sep 17 00:00:00 2001 From: martyav Date: Wed, 15 Nov 2023 14:11:14 -0500 Subject: [PATCH 09/24] even more copy edits --- .../set-up-cloud-providers/amazon.md | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 537f8b1b0b4f..fbea465f161d 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -7,12 +7,12 @@ weight: 1 -When using the `Amazon` cloud provider, you can leverage the following capabilities: +When you use Amazon as a cloud provider, you can leverage the following capabilities: -- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. -- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. +- **Load Balancers:** Launch an AWS Elastic Load Balancer (ELB) when you select `Layer-4 Load Balancer` in **Port Mapping** or when you launch a `Service` with `type: LoadBalancer`. +- **Persistent Volumes**: Use AWS Elastic Block Stores (EBS) for persistent volumes. -See [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for all information regarding the Amazon cloud provider. +See the [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for more information about the Amazon cloud provider. To set up the Amazon cloud provider, @@ -21,9 +21,9 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree Cloud Providers have been removed completely, and will no longer continue to function post-upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can proceed to [set up an out-of-tree cloud provider for RKE1](#using-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree Cloud Providers have been removed completely, and will no longer continue to function post-upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can proceed to [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. -You can also [migrate from an in-tree to out-of-tree AWS cloud provider](#migrating-to-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to 1.27 in order to stay functional. +You can also [migrate from an in-tree to out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to 1.27 in order to stay functional. Starting with Kubernetes 1.23, you have to deactivate the `CSIMigrationAWS` feature gate in order to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. @@ -207,7 +207,7 @@ rancher_kubernetes_engine_config: useInstanceMetadataHostname: true/false ``` -Existing clusters using **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but not facilitate setting node name. +Existing clusters using **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but won't set the node name. 3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done via [Helm charts in UI](#helm-chart-installation) or manually. @@ -217,7 +217,7 @@ The upstream documentation for the AWS cloud controller manager can be found [he ::: -### Using Out-of-tree AWS Cloud Provider for RKE2 +### Using the Out-of-tree AWS Cloud Provider for RKE2 1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. 2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: @@ -373,11 +373,11 @@ nodeSelector: To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. -If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. +If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-the-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. -:::note Important +:::note Important: The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). ::: @@ -415,7 +415,7 @@ kubectl cordon -l "node-role.kubernetes.io/controlplane=true" 5. Update cluster.yml to change the cloud provider and remove the leader migration arguments from the kube-controller. -Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not providing custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: +Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` queries the EC2 metadata service and sets `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: ```yaml rancher_kubernetes_engine_config: @@ -471,7 +471,7 @@ spec: kubectl cordon -l "node-role.kubernetes.io/controlplane=true" ``` -3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2). +3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-the-out-of-tree-aws-cloud-provider-for-rke2). Update container args to enable leader migration: @@ -507,9 +507,7 @@ spec: - --enable-leader-migration=true ``` -7. The Cloud Provider is responsible for setting the ProviderID of the node. - -Check if all nodes are initialized with the ProviderID: +7. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: ```bash kubectl describe nodes | grep "ProviderID" From e1b50ca724959ec2b9ba24a729b503c1128dc7cd Mon Sep 17 00:00:00 2001 From: martyav Date: Wed, 15 Nov 2023 14:19:31 -0500 Subject: [PATCH 10/24] copy edits to note at top --- .../set-up-cloud-providers/amazon.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index fbea465f161d..9e8c93f08ca2 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,11 +21,11 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree Cloud Providers have been removed completely, and will no longer continue to function post-upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can proceed to [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been removed completely, and won't work after you upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. -You can also [migrate from an in-tree to out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to 1.27 in order to stay functional. +You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. -Starting with Kubernetes 1.23, you have to deactivate the `CSIMigrationAWS` feature gate in order to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. +Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. ::: From 2d89087244338a88568d5b018a3350ca8e59e19d Mon Sep 17 00:00:00 2001 From: martyav Date: Wed, 15 Nov 2023 14:47:05 -0500 Subject: [PATCH 11/24] addressing suggestions from slickwarren --- .../set-up-cloud-providers/amazon.md | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 9e8c93f08ca2..ffe70a86f544 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -424,7 +424,7 @@ rancher_kubernetes_engine_config: useInstanceMetadataHostname: true/false ``` -Remove `enable-leader-migration`: +Remove `enable-leader-migration` if you don't want it enabled in your cluster: ```yaml services: @@ -433,16 +433,19 @@ services: enable-leader-migration: "true" ``` -6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. - -7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. - -8. (Optional) After the upgrade, you can update the AWS cloud controller manager to disable leader migration. Upgrade the chart and remove the following section from the container arguments: +:::tip +You can also disable leader migration after step 7. Upgrade the chart and remove the following section from the container arguments: ```yaml - --enable-leader-migration=true ``` +::: + +6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. + +7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. + #### Migrating to the Out-of-Tree AWS Cloud Provider for RKE2 1. Update the cluster config to enable leader migration: @@ -471,9 +474,7 @@ spec: kubectl cordon -l "node-role.kubernetes.io/controlplane=true" ``` -3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-the-out-of-tree-aws-cloud-provider-for-rke2). - -Update container args to enable leader migration: +3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-the-out-of-tree-aws-cloud-provider-for-rke2). In step 3 of the fresh install steps, add the following container arg to the `additionalManifest` to enable leader migration: ```bash - '--enable-leader-migration=true' @@ -481,16 +482,14 @@ Update container args to enable leader migration: 4. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` successfully deploys. -5. Update the provisioning cluster to change the cloud provider and remove leader migration args from kube-controller. - -If upgrading the Kubernetes version, set the Kubernetes version as well. +5. Update the provisioning cluster to change the cloud provider and remove leader migration args from kube-controller. If upgrading the Kubernetes version, set the Kubernetes version as well in the `machineSelectorConfig` section of the cluster YAML file: ```yaml cloud_provider: name: external ``` -Remove `enable-leader-migration`: +Remove `enable-leader-migration` if you don't want it enabled in your cluster: ```yaml spec: @@ -501,13 +500,16 @@ spec: - enable-leader-migration=true ``` -6. (Optional) After the upgrade, you can update the AWS cloud controller manager to disable leader migration. Upgrade the chart and remove the following section from the container arguments: +:::tip +You can also disable leader migration after the upgrade. Upgrade the chart and remove the following section from the container arguments: ```yaml - --enable-leader-migration=true ``` -7. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: +::: + +6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: ```bash kubectl describe nodes | grep "ProviderID" From ccaa1b1ad2a0acef83414d754c8790e73759479c Mon Sep 17 00:00:00 2001 From: Jake Hyde Date: Fri, 17 Nov 2023 18:10:16 -0500 Subject: [PATCH 12/24] Address review comments --- .../set-up-cloud-providers/amazon.md | 522 +++++++++++++++++- .../migrate-from-in-tree-to-out-of-tree.md | 4 +- 2 files changed, 497 insertions(+), 29 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index ffe70a86f544..ff667fa43dbb 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,7 +21,7 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been removed completely, and won't work after you upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. @@ -209,7 +209,7 @@ rancher_kubernetes_engine_config: Existing clusters using **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but won't set the node name. -3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done via [Helm charts in UI](#helm-chart-installation) or manually. +3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done via [Helm charts in UI](#helm-chart-installation-from-ui) or manually. :::note @@ -217,7 +217,290 @@ The upstream documentation for the AWS cloud controller manager can be found [he ::: -### Using the Out-of-tree AWS Cloud Provider for RKE2 +### Helm Chart Installation from CLI + +:::tip + +Official upstream docs can be found [here](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager). + +1. Add the helm repository: + +```shell +helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws +helm repo update +``` + +2. Create a `values.yaml` file to override default `values.yaml` with the following contents: + +```yaml +# values.yaml +hostNetworking: true +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +args: + - --configure-cloud-routes=false + - --use-service-account-credentials=true + - --v=2 + - --cloud-provider=aws +clusterRoleRules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create +``` + +3. Install the helm chart: + +```shell +helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +``` + +Verify the helm chart installed successfully with the following command: + +```shell +helm status -n kube-system aws-cloud-controller-manager +``` + +4. (Optional) Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Helm Chart Installation from UI + +1. Click **☰**, then select the name of the cluster from the left navigation. + +2. Select **Apps** > **Repositories**. + +3. Click the **Create** button. + +4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. + +5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. + +6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. + +7. Add the following container arguments: + +``` + - '--use-service-account-credentials=true' + - '--configure-cloud-routes=false' +``` + +8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup. + +``` + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get +``` + +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane` so you must update tolerations and the nodeSelector: + +``` +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane + +``` + +``` +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +``` + +:::note + +There's currently [a known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: + +``` +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +``` + +::: + +10. Install chart and confirm daemonset `aws-cloud-controller-manager` deploys successfully with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 + + +To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. + +If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +**Important** +- The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. + +- Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. + Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). + + +1. Update the cluster config to enable leader migration in `cluster.yml` + +``` +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +Note that the cloud provider is still `aws` at this step. + +``` +cloud_provider: + name: aws +``` + +2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider. + +``` +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. +To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): + +``` +- '--enable-leader-migration=true' +``` + +4. Confirm that the chart is installed but the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will uncordon each node after upgrading and `aws-controller-manager` pods will be scheduled. + +5. Update cluster.yml to change the cloud provider and remove the leader migration arguments from the kube-controller. + +Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not providing custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: + +``` +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true/false +``` + +**Remove** `enable-leader-migration` from: +``` +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. + +7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. + +8. (Optional) After the upgrade, leader migration is no longer required due to only one cloud-controller-manager and can be removed. Upgrade the chart and remove the following section from the container arguments: + +``` +- --enable-leader-migration=true +``` + +Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Using Out-of-tree AWS Cloud Provider for RKE2 1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. 2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: @@ -231,7 +514,7 @@ spec: :::note -This option will be passed to the configuration of the various kubernetes components that run on the node, and must be overridden per component: +This option will be passed to the configuration of the various kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: ::: @@ -244,7 +527,12 @@ spec: - config: kubelet-arg: - cloud-provider=external - machineLabelSelector: rke.cattle.io/etcd-role:true + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/etcd-role + operator: In + values: + - 'true' ``` - Control Plane @@ -254,14 +542,19 @@ spec: rkeConfig: machineSelectorConfig: - config: - disable-cloud-controller: true - kube-apiserver-arg: - - cloud-provider=external - kube-controller-manager-arg: - - cloud-provider=external - kubelet-arg: - - cloud-provider=external - machineLabelSelector: rke.cattle.io/control-plane-role:true + disable-cloud-controller: true + kube-apiserver-arg: + - cloud-provider=external + kube-controller-manager-arg: + - cloud-provider=external + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' ``` - Worker @@ -273,11 +566,16 @@ spec: - config: kubelet-arg: - cloud-provider=external - machineLabelSelector: rke.cattle.io/worker-role:true + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/worker-role + operator: In + values: + - 'true' ``` 2. Select `Amazon` if relying on the above mechanism to set the provider ID. - Otherwise, select `External (out-of-tree)` cloud provider, which sets `--cloud-provider=external` for Kubernetes components. + Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. 3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: @@ -305,7 +603,140 @@ spec: - --cloud-provider=aws ``` -### Helm Chart Installation +### Helm Chart Installation from CLI + +:::tip + +Official upstream docs can be found [here](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager). + +1. Add the helm repository: + +```shell +helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws +helm repo update +``` + +2. Create a `values.yaml` file to override default `values.yaml` with the following contents: + +```yaml +# values.yaml +hostNetworking: true +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +args: + - --configure-cloud-routes=false + - --use-service-account-credentials=true + - --v=2 + - --cloud-provider=aws +clusterRoleRules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create +``` + +3. Install the helm chart: + +```shell +helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +``` + +Verify the helm chart installed successfully with the following command: + +```shell +helm status -n kube-system aws-cloud-controller-manager +``` + +4. (Optional) Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Helm Chart Installation from UI 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -405,7 +836,7 @@ cloud_provider: kubectl cordon -l "node-role.kubernetes.io/controlplane=true" ``` -3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation): +3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): ```bash - '--enable-leader-migration=true' @@ -456,7 +887,13 @@ spec: machineSelectorConfig: - config: kube-controller-manager-arg: - - enable-leader-migration=true + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' ``` Note that the cloud provider is still `aws` at this step: @@ -472,6 +909,10 @@ spec: ```bash kubectl cordon -l "node-role.kubernetes.io/controlplane=true" + +3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) +From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. +Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: ``` 3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-the-out-of-tree-aws-cloud-provider-for-rke2). In step 3 of the fresh install steps, add the following container arg to the `additionalManifest` to enable leader migration: @@ -480,33 +921,60 @@ kubectl cordon -l "node-role.kubernetes.io/controlplane=true" - '--enable-leader-migration=true' ``` -4. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` successfully deploys. +4. Install the chart and confirm daemonset `aws-cloud-controller-manager` successfully deploys with the following command: -5. Update the provisioning cluster to change the cloud provider and remove leader migration args from kube-controller. If upgrading the Kubernetes version, set the Kubernetes version as well in the `machineSelectorConfig` section of the cluster YAML file: - -```yaml -cloud_provider: - name: external +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` +5. Update the provisioning cluster to change the cloud provider and remove leader migration args from kube-controller. +If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file + +:::note Important + +Only remove `cloud-provider-name: aws` if not relying on the rke2 supervisor to correctly set the providerID. + +::: + Remove `enable-leader-migration` if you don't want it enabled in your cluster: +``` +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: external +``` + +Remove `enable-leader-migration` from: + ```yaml spec: rkeConfig: machineSelectorConfig: - config: kube-controller-manager-arg: - - enable-leader-migration=true + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' ``` - :::tip -You can also disable leader migration after the upgrade. Upgrade the chart and remove the following section from the container arguments: +You can also disable leader migration after the upgrade, as leader migration is no longer required due to only one cloud-controller-manager and can be removed. +Upgrade the chart and remove the following section from the container arguments: ```yaml - --enable-leader-migration=true ``` +Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + ::: 6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md index d302213118ac..3eb227d79ec9 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md @@ -64,7 +64,7 @@ Once all nodes are tainted by the running the script, launch the Helm vSphere CP 1. Click **☰ > Cluster Management**. 1. Go to the cluster where the vSphere CPI chart will be installed and click **Explore**. 1. Click **Apps > Charts**. -1. Click **vSphere CPI**.. +1. Click **vSphere CPI**. 1. Click **Install**. 1. Fill out the required vCenter details and click **Install**. @@ -81,7 +81,7 @@ kubectl describe nodes | grep "ProviderID" 1. Click **☰ > Cluster Management**. 1. Go to the cluster where the vSphere CSI chart will be installed and click **Explore**. 1. Click **Apps > Charts**. -1. Click **vSphere CSI**.. +1. Click **vSphere CSI**. 1. Click **Install**. 1. Fill out the required vCenter details and click **Install**. 1. Check **Customize Helm options before install** and click **Next**. From 5f3237156fcec87245542b3c6610f2687f1dcf72 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 21 Nov 2023 14:59:44 -0500 Subject: [PATCH 13/24] copyedits --- .../set-up-cloud-providers/amazon.md | 143 +++++++++--------- 1 file changed, 69 insertions(+), 74 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 3580dd5fb49e..697eba68736b 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -179,9 +179,13 @@ The kubelet component has the ability to automatically obtain ECR credentials, w 1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. -> When IP-based naming is used, the nodes must be named after the instance followed by the regional domain name (`ip-xxx-xxx-xxx-xxx.ec2..internal`). If you have a custom domain name set in the DHCP options, you must set `--hostname-override` on `kube-proxy` and `kubelet` to match this naming convention. +:::note + +If you use IP-based naming, the nodes must be named after the instance followed by the regional domain name (`ip-xxx-xxx-xxx-xxx.ec2..internal`). If you have a custom domain name set in the DHCP options, you must set `--hostname-override` on `kube-proxy` and `kubelet` to match this naming convention. + +::: -To meet node naming conventions, Rancher allows setting `useInstanceMetadataHostname` when `External Amazon` cloud provider is selected. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: +To meet node naming conventions, Rancher allows setting `useInstanceMetadataHostname` when the `External Amazon` cloud provider is selected. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: ```yaml rancher_kubernetes_engine_config: @@ -190,15 +194,15 @@ rancher_kubernetes_engine_config: useInstanceMetadataHostname: true ``` -You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), add [`--node-name`](../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options) to the `docker run` node registration command to set `hostname-override` —for example, `"$(hostname -f)"`). This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. +You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), add [`--node-name`](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) to the `docker run` node registration command to set `hostname-override` — for example, `"$(hostname -f)"`. This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. -2. Select cloud provider. +2. Select the cloud provider. -Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and enable `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. +Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and enables `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. ::: note -You must disable `useInstanceMetadataHostname` when setting a custom node name via `node-name` for custom clusters. +You must disable `useInstanceMetadataHostname` when setting a custom node name for custom clusters via `node-name`. ::: @@ -209,21 +213,15 @@ rancher_kubernetes_engine_config: useInstanceMetadataHostname: true/false ``` -Existing clusters using **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but won't set the node name. +Existing clusters that use an **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but won't set the node name. -3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done via [Helm charts in UI](#helm-chart-installation-from-ui) or manually. +3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done manually, or via [Helm charts in UI](#helm-chart-installation-from-ui). -:::note - -The upstream documentation for the AWS cloud controller manager can be found [here](https://kubernetes.github.io/cloud-provider-aws). - -::: +Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws). ### Helm Chart Installation from CLI -:::tip - -Official upstream docs can be found [here](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager). +Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. 1. Add the helm repository: @@ -232,7 +230,7 @@ helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-pr helm repo update ``` -2. Create a `values.yaml` file to override default `values.yaml` with the following contents: +2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`: ```yaml # values.yaml @@ -340,13 +338,13 @@ clusterRoleRules: helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` -Verify the helm chart installed successfully with the following command: +Verify that the helm chart installed successfully: ```shell helm status -n kube-system aws-cloud-controller-manager ``` -4. (Optional) Verify the cloud controller manager update was successfully rolled out with the following command: +4. (Optional) Verify that the cloud controller manager update succeeded: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager @@ -368,14 +366,14 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager 7. Add the following container arguments: -``` +```yaml - '--use-service-account-credentials=true' - '--configure-cloud-routes=false' ``` -8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup. +8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup: -``` +```yaml - apiGroups: - '' resources: @@ -385,9 +383,9 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager - get ``` -9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane` so you must update tolerations and the nodeSelector: +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: -``` +```yaml tolerations: - effect: NoSchedule key: node.cloudprovider.kubernetes.io/uninitialized @@ -398,23 +396,23 @@ tolerations: ``` -``` +```yaml nodeSelector: node-role.kubernetes.io/controlplane: 'true' ``` :::note -There's currently [a known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: -``` +``` yaml nodeSelector: node-role.kubernetes.io/controlplane: 'true' ``` ::: -10. Install chart and confirm daemonset `aws-cloud-controller-manager` deploys successfully with the following command: +10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` deploys successfully: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager @@ -422,56 +420,55 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 - To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. -**Important** -- The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. +:::note Important -- Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. - Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. +Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). -1. Update the cluster config to enable leader migration in `cluster.yml` +::: -``` +1. Update the cluster config to enable leader migration in `cluster.yml`: + +```yaml services: kube-controller: extra_args: enable-leader-migration: "true" ``` -Note that the cloud provider is still `aws` at this step. +Note that the cloud provider is still `aws` at this step: -``` +```yaml cloud_provider: name: aws ``` -2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider. +2. Cordon the control plane nodes, so that the AWS cloud controller pods run on nodes only after upgrading to the external cloud provider. -``` +```shell kubectl cordon -l "node-role.kubernetes.io/controlplane=true" ``` -3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. -To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): +3. To install the AWS cloud controller manager, you must enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): -``` +```yaml - '--enable-leader-migration=true' ``` 4. Confirm that the chart is installed but the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will uncordon each node after upgrading and `aws-controller-manager` pods will be scheduled. -5. Update cluster.yml to change the cloud provider and remove the leader migration arguments from the kube-controller. +5. Update `cluster.yml` to change the cloud provider and remove the leader migration arguments from the kube-controller. -Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not providing custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: +Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and lets you enable `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: -``` +```yaml rancher_kubernetes_engine_config: cloud_provider: name: external-aws @@ -479,7 +476,8 @@ rancher_kubernetes_engine_config: ``` **Remove** `enable-leader-migration` from: -``` + +```yaml services: kube-controller: extra_args: @@ -492,7 +490,7 @@ services: 8. (Optional) After the upgrade, leader migration is no longer required due to only one cloud-controller-manager and can be removed. Upgrade the chart and remove the following section from the container arguments: -``` +```shell - --enable-leader-migration=true ``` @@ -505,7 +503,8 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ### Using Out-of-tree AWS Cloud Provider for RKE2 1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. -2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`, however the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: + +2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: ```yaml spec: @@ -514,13 +513,10 @@ spec: cloud-provider-name: aws ``` -:::note +This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: -This option will be passed to the configuration of the various kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: -::: - -- Etcd +**Override on Etcd:** ```yaml spec: @@ -537,7 +533,7 @@ spec: - 'true' ``` -- Control Plane +**Override on Control Plane:** ```yaml spec: @@ -559,7 +555,7 @@ spec: - 'true' ``` -- Worker +**Override on Worker:** ```yaml spec: @@ -576,8 +572,7 @@ spec: - 'true' ``` -2. Select `Amazon` if relying on the above mechanism to set the provider ID. - Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. +2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. 3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: @@ -607,9 +602,7 @@ spec: ### Helm Chart Installation from CLI -:::tip - -Official upstream docs can be found [here](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager). +Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. 1. Add the helm repository: @@ -618,7 +611,7 @@ helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-pr helm repo update ``` -2. Create a `values.yaml` file to override default `values.yaml` with the following contents: +2. Create a `values.yaml` file with the following contents to override the default `values.yaml`: ```yaml # values.yaml @@ -726,13 +719,13 @@ clusterRoleRules: helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` -Verify the helm chart installed successfully with the following command: +Verify that the helm chart installed successfully: ```shell helm status -n kube-system aws-cloud-controller-manager ``` -4. (Optional) Verify the cloud controller manager update was successfully rolled out with the following command: +4. (Optional) Verify that the cloud controller manager update succeeded: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager @@ -771,7 +764,7 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager - get ``` -9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane` so you must update tolerations and the nodeSelector: +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: ```yaml tolerations: @@ -791,7 +784,7 @@ nodeSelector: :::note -There's currently [a known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: ```yaml nodeSelector: @@ -804,7 +797,7 @@ nodeSelector: ### Migrating to the Out-of-Tree AWS Cloud Provider -To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. +To migrate from the in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-the-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. @@ -834,13 +827,13 @@ cloud_provider: 2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: -```bash +```shell kubectl cordon -l "node-role.kubernetes.io/controlplane=true" ``` 3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): -```bash +```shell - '--enable-leader-migration=true' ``` @@ -909,8 +902,9 @@ spec: 2. Cordon control plane nodes so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: -```bash +```shell kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` 3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. @@ -919,17 +913,17 @@ Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfi 3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-the-out-of-tree-aws-cloud-provider-for-rke2). In step 3 of the fresh install steps, add the following container arg to the `additionalManifest` to enable leader migration: -```bash +```shell - '--enable-leader-migration=true' ``` -4. Install the chart and confirm daemonset `aws-cloud-controller-manager` successfully deploys with the following command: +4. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` successfully deployed: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -5. Update the provisioning cluster to change the cloud provider and remove leader migration args from kube-controller. +5. Update the provisioning cluster to change the cloud provider and remove leader migration args from the kube controller. If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file :::note Important @@ -940,7 +934,7 @@ Only remove `cloud-provider-name: aws` if not relying on the rke2 supervisor to Remove `enable-leader-migration` if you don't want it enabled in your cluster: -``` +```yaml spec: rkeConfig: machineGlobalConfig: @@ -963,6 +957,7 @@ spec: values: - 'true' ``` + :::tip You can also disable leader migration after the upgrade, as leader migration is no longer required due to only one cloud-controller-manager and can be removed. Upgrade the chart and remove the following section from the container arguments: @@ -981,6 +976,6 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager 6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: -```bash +```shell kubectl describe nodes | grep "ProviderID" ``` From 03fa90b8e1131115640f7d77f0f4da7069761767 Mon Sep 17 00:00:00 2001 From: Jake Hyde Date: Mon, 27 Nov 2023 16:02:55 -0500 Subject: [PATCH 14/24] Fix numbering --- .../set-up-cloud-providers/amazon.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 697eba68736b..b53b9ce4fea7 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -909,9 +909,6 @@ kubectl cordon -l "node-role.kubernetes.io/controlplane=true" 3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: -``` - -3. To install the AWS cloud controller manager with leader migration enabled, follow steps 1-3 for [deploying a cloud controller manager chart](#using-the-out-of-tree-aws-cloud-provider-for-rke2). In step 3 of the fresh install steps, add the following container arg to the `additionalManifest` to enable leader migration: ```shell - '--enable-leader-migration=true' From a7c57b2aa161424f65bd2289c2c8360ec05d73d2 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Mon, 27 Nov 2023 17:41:56 -0500 Subject: [PATCH 15/24] Update docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md --- .../set-up-cloud-providers/amazon.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index b53b9ce4fea7..38330689f0b6 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -906,7 +906,7 @@ spec: kubectl cordon -l "node-role.kubernetes.io/controlplane=true" ``` -3. To install AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) +3. To install the AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying the cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: From 4523b540df1432a1dc267b807a2665a2b924af9a Mon Sep 17 00:00:00 2001 From: Kinara Shah Date: Tue, 28 Nov 2023 06:06:03 -0800 Subject: [PATCH 16/24] update helm installation steps --- .../set-up-cloud-providers/amazon.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 38330689f0b6..507c51e9f420 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -335,7 +335,7 @@ clusterRoleRules: 3. Install the helm chart: ```shell -helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` Verify that the helm chart installed successfully: @@ -344,7 +344,13 @@ Verify that the helm chart installed successfully: helm status -n kube-system aws-cloud-controller-manager ``` -4. (Optional) Verify that the cloud controller manager update succeeded: +4. If present, edit daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: + +```shell +kubectl edit daemonset aws-cloud-controller-manager -n kube-system +``` + +5. (Optional) Verify that the cloud controller manager update succeeded: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager From 9fbf725958cbd17dcd47ff1eacdea465b28eddf6 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 28 Nov 2023 11:28:25 -0500 Subject: [PATCH 17/24] 2.8 versioning --- .../set-up-cloud-providers/amazon.md | 990 ++++++++++++++++-- .../migrate-from-in-tree-to-out-of-tree.md | 4 +- 2 files changed, 906 insertions(+), 88 deletions(-) diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 10700b418d4e..507c51e9f420 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -7,12 +7,12 @@ weight: 1 -When using the `Amazon` cloud provider, you can leverage the following capabilities: +When you use Amazon as a cloud provider, you can leverage the following capabilities: -- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. -- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. +- **Load Balancers:** Launch an AWS Elastic Load Balancer (ELB) when you select `Layer-4 Load Balancer` in **Port Mapping** or when you launch a `Service` with `type: LoadBalancer`. +- **Persistent Volumes**: Use AWS Elastic Block Stores (EBS) for persistent volumes. -See [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for all information regarding the Amazon cloud provider. +See the [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for more information about the Amazon cloud provider. To set up the Amazon cloud provider, @@ -21,7 +21,11 @@ To set up the Amazon cloud provider, :::note Important: -Starting with Kubernetes 1.23, you have to deactivate the `CSIMigrationAWS` feature gate in order to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. + +You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. + +Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. ::: @@ -40,71 +44,71 @@ IAM Policy for nodes with the `controlplane` role: ```json { -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } -] + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] } ``` @@ -112,24 +116,24 @@ IAM policy for nodes with the `etcd` or `worker` role: ```json { -"Version": "2012-10-17", -"Statement": [ + "Version": "2012-10-17", + "Statement": [ { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" } -] + ] } ``` @@ -161,6 +165,820 @@ If you share resources between clusters, you can change the tag to: The string value, ``, is the Kubernetes cluster's ID. +:::note + +Do not tag a resource with multiple owned or shared tags. + +::: + ### Using Amazon Elastic Container Registry (ECR) The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. + +### Using the Out-of-Tree AWS Cloud Provider for RKE1 + +1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. + +:::note + +If you use IP-based naming, the nodes must be named after the instance followed by the regional domain name (`ip-xxx-xxx-xxx-xxx.ec2..internal`). If you have a custom domain name set in the DHCP options, you must set `--hostname-override` on `kube-proxy` and `kubelet` to match this naming convention. + +::: + +To meet node naming conventions, Rancher allows setting `useInstanceMetadataHostname` when the `External Amazon` cloud provider is selected. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: + +```yaml +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true +``` + +You must not enable `useInstanceMetadataHostname` when setting custom values for `hostname-override` for custom clusters. When you create a [custom cluster](../../../../pages-for-subheaders/use-existing-nodes.md), add [`--node-name`](../../../../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) to the `docker run` node registration command to set `hostname-override` — for example, `"$(hostname -f)"`. This can be done manually or by using **Show Advanced Options** in the Rancher UI to add **Node Name**. + +2. Select the cloud provider. + +Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and enables `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. + +::: note + +You must disable `useInstanceMetadataHostname` when setting a custom node name for custom clusters via `node-name`. + +::: + +```yaml +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true/false +``` + +Existing clusters that use an **External** cloud provider will set `--cloud-provider=external` for Kubernetes components but won't set the node name. + +3. Install the AWS cloud controller manager after the cluster finishes provisioning. Note that the cluster isn't successfully provisioned and nodes are still in an `uninitialized` state until you deploy the cloud controller manager. This can be done manually, or via [Helm charts in UI](#helm-chart-installation-from-ui). + +Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws). + +### Helm Chart Installation from CLI + +Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. + +1. Add the helm repository: + +```shell +helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws +helm repo update +``` + +2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`: + +```yaml +# values.yaml +hostNetworking: true +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +args: + - --configure-cloud-routes=false + - --use-service-account-credentials=true + - --v=2 + - --cloud-provider=aws +clusterRoleRules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create +``` + +3. Install the helm chart: + +```shell +helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +``` + +Verify that the helm chart installed successfully: + +```shell +helm status -n kube-system aws-cloud-controller-manager +``` + +4. If present, edit daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: + +```shell +kubectl edit daemonset aws-cloud-controller-manager -n kube-system +``` + +5. (Optional) Verify that the cloud controller manager update succeeded: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Helm Chart Installation from UI + +1. Click **☰**, then select the name of the cluster from the left navigation. + +2. Select **Apps** > **Repositories**. + +3. Click the **Create** button. + +4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. + +5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. + +6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. + +7. Add the following container arguments: + +```yaml + - '--use-service-account-credentials=true' + - '--configure-cloud-routes=false' +``` + +8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup: + +```yaml + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get +``` + +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: + +```yaml +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane + +``` + +```yaml +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +``` + +:::note + +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: + +``` yaml +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +``` + +::: + +10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` deploys successfully: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 + +To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. + +If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +:::note Important + +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. + +Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). + +::: + +1. Update the cluster config to enable leader migration in `cluster.yml`: + +```yaml +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +Note that the cloud provider is still `aws` at this step: + +```yaml +cloud_provider: + name: aws +``` + +2. Cordon the control plane nodes, so that the AWS cloud controller pods run on nodes only after upgrading to the external cloud provider. + +```shell +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager, you must enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): + +```yaml +- '--enable-leader-migration=true' +``` + +4. Confirm that the chart is installed but the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will uncordon each node after upgrading and `aws-controller-manager` pods will be scheduled. + +5. Update `cluster.yml` to change the cloud provider and remove the leader migration arguments from the kube-controller. + +Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and lets you enable `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: + +```yaml +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true/false +``` + +**Remove** `enable-leader-migration` from: + +```yaml +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. + +7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. + +8. (Optional) After the upgrade, leader migration is no longer required due to only one cloud-controller-manager and can be removed. Upgrade the chart and remove the following section from the container arguments: + +```shell +- --enable-leader-migration=true +``` + +Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Using Out-of-tree AWS Cloud Provider for RKE2 + +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. + +2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: + + +**Override on Etcd:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/etcd-role + operator: In + values: + - 'true' +``` + +**Override on Control Plane:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kube-apiserver-arg: + - cloud-provider=external + kube-controller-manager-arg: + - cloud-provider=external + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +**Override on Worker:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/worker-role + operator: In + values: + - 'true' +``` + +2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. + +3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: + +```yaml +spec: + rkeConfig: + additionalManifest: |- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + spec: + chart: aws-cloud-controller-manager + repo: https://kubernetes.github.io/cloud-provider-aws + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + hostNetworking: true + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + args: + - --configure-cloud-routes=false + - --v=5 + - --cloud-provider=aws +``` + +### Helm Chart Installation from CLI + +Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. + +1. Add the helm repository: + +```shell +helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws +helm repo update +``` + +2. Create a `values.yaml` file with the following contents to override the default `values.yaml`: + +```yaml +# values.yaml +hostNetworking: true +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +args: + - --configure-cloud-routes=false + - --use-service-account-credentials=true + - --v=2 + - --cloud-provider=aws +clusterRoleRules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - list + - patch + - update + - watch + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create +``` + +3. Install the helm chart: + +```shell +helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +``` + +Verify that the helm chart installed successfully: + +```shell +helm status -n kube-system aws-cloud-controller-manager +``` + +4. (Optional) Verify that the cloud controller manager update succeeded: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +### Helm Chart Installation from UI + +1. Click **☰**, then select the name of the cluster from the left navigation. + +2. Select **Apps** > **Repositories**. + +3. Click the **Create** button. + +4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. + +5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. + +6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. + +7. Add the following container arguments: + +```yaml + - '--use-service-account-credentials=true' + - '--configure-cloud-routes=false' +``` + +8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup. + +```yaml + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get +``` + +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: + +```yaml +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane + +``` + +```yaml +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +``` + +:::note + +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: + +```yaml +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' +``` + +::: + +10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). + +### Migrating to the Out-of-Tree AWS Cloud Provider + +To migrate from the in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-the-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. + +If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +:::note Important: +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). +::: + +#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 + +1. Update the cluster config to enable leader migration in `cluster.yml` + +```yaml +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +Note that the cloud provider is still `aws` at this step. + +```yaml +cloud_provider: + name: aws +``` + +2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: + +```shell +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): + +```shell +- '--enable-leader-migration=true' +``` + +4. Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will upgrade and uncordon each node, and schedule `aws-controller-manager` pods. + +5. Update cluster.yml to change the cloud provider and remove the leader migration arguments from the kube-controller. + +Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` queries the EC2 metadata service and sets `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: + +```yaml +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true/false +``` + +Remove `enable-leader-migration` if you don't want it enabled in your cluster: + +```yaml +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +:::tip +You can also disable leader migration after step 7. Upgrade the chart and remove the following section from the container arguments: + +```yaml +- --enable-leader-migration=true +``` + +::: + +6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. + +7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. + +#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE2 + +1. Update the cluster config to enable leader migration: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +Note that the cloud provider is still `aws` at this step: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +2. Cordon control plane nodes so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: + +```shell +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying the cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) +From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. +Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: + +```shell +- '--enable-leader-migration=true' +``` + +4. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` successfully deployed: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +5. Update the provisioning cluster to change the cloud provider and remove leader migration args from the kube controller. +If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file + +:::note Important + +Only remove `cloud-provider-name: aws` if not relying on the rke2 supervisor to correctly set the providerID. + +::: + +Remove `enable-leader-migration` if you don't want it enabled in your cluster: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: external +``` + +Remove `enable-leader-migration` from: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +:::tip +You can also disable leader migration after the upgrade, as leader migration is no longer required due to only one cloud-controller-manager and can be removed. +Upgrade the chart and remove the following section from the container arguments: + +```yaml +- --enable-leader-migration=true +``` + +Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +::: + +6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: + +```shell +kubectl describe nodes | grep "ProviderID" +``` diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md index d302213118ac..3eb227d79ec9 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md @@ -64,7 +64,7 @@ Once all nodes are tainted by the running the script, launch the Helm vSphere CP 1. Click **☰ > Cluster Management**. 1. Go to the cluster where the vSphere CPI chart will be installed and click **Explore**. 1. Click **Apps > Charts**. -1. Click **vSphere CPI**.. +1. Click **vSphere CPI**. 1. Click **Install**. 1. Fill out the required vCenter details and click **Install**. @@ -81,7 +81,7 @@ kubectl describe nodes | grep "ProviderID" 1. Click **☰ > Cluster Management**. 1. Go to the cluster where the vSphere CSI chart will be installed and click **Explore**. 1. Click **Apps > Charts**. -1. Click **vSphere CSI**.. +1. Click **vSphere CSI**. 1. Click **Install**. 1. Fill out the required vCenter details and click **Install**. 1. Check **Customize Helm options before install** and click **Next**. From b4371cf00c89a0c6301c5dfc01b2823603bfa4ac Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 28 Nov 2023 11:41:52 -0500 Subject: [PATCH 18/24] rm 'new in 2.7' from 2.8 --- .../version-2.8/integrations-in-rancher/rancher-extensions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md b/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md index 0a86dad7d2da..29a1b45727f9 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md @@ -2,7 +2,7 @@ title: Rancher Extensions --- -New in Rancher v2.7.0, Rancher introduces **extensions**. Extensions allow users, developers, partners, and customers to extend and enhance the Rancher UI. In addition, users can make changes and create enhancements to their UI functionality independent of Rancher releases. Extensions will enable users to build on top of Rancher to better tailor it to their respective environments. Note that users will also have the ability to update to new versions as well as roll back to a previous version. +Extensions allow users, developers, partners, and customers to extend and enhance the Rancher UI. In addition, users can make changes and create enhancements to their UI functionality independent of Rancher releases. Extensions will enable users to build on top of Rancher to better tailor it to their respective environments. Note that users will also have the ability to update to new versions as well as roll back to a previous version. Extensions are Helm charts that can only be installed once into a cluster; therefore, these charts have been simplified and separated from the general Helm charts listed under **Apps**. From 39555209702584fb10e0d4839895ce61322b336e Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Tue, 28 Nov 2023 12:22:22 -0500 Subject: [PATCH 19/24] Update versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md --- .../set-up-cloud-providers/amazon.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 507c51e9f420..178a7e3c7bbf 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -508,7 +508,7 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ### Using Out-of-tree AWS Cloud Provider for RKE2 -1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. +1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. 2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: From 5f3227b25194c2b397856732dd8ca5527a67f4a2 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 28 Nov 2023 12:23:40 -0500 Subject: [PATCH 20/24] revert -- change intended for other branch --- .../version-2.8/integrations-in-rancher/rancher-extensions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md b/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md index 29a1b45727f9..0a86dad7d2da 100644 --- a/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md +++ b/versioned_docs/version-2.8/integrations-in-rancher/rancher-extensions.md @@ -2,7 +2,7 @@ title: Rancher Extensions --- -Extensions allow users, developers, partners, and customers to extend and enhance the Rancher UI. In addition, users can make changes and create enhancements to their UI functionality independent of Rancher releases. Extensions will enable users to build on top of Rancher to better tailor it to their respective environments. Note that users will also have the ability to update to new versions as well as roll back to a previous version. +New in Rancher v2.7.0, Rancher introduces **extensions**. Extensions allow users, developers, partners, and customers to extend and enhance the Rancher UI. In addition, users can make changes and create enhancements to their UI functionality independent of Rancher releases. Extensions will enable users to build on top of Rancher to better tailor it to their respective environments. Note that users will also have the ability to update to new versions as well as roll back to a previous version. Extensions are Helm charts that can only be installed once into a cluster; therefore, these charts have been simplified and separated from the general Helm charts listed under **Apps**. From e5152d565e712bd49092e7c479dba11b88cdb475 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 28 Nov 2023 15:25:58 -0500 Subject: [PATCH 21/24] typo fixes --- .../set-up-cloud-providers/amazon.md | 26 ++++++++--------- .../set-up-cloud-providers/amazon.md | 28 +++++++++---------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 507c51e9f420..f18a621bfad1 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,9 +21,9 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. -You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. +You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. @@ -175,9 +175,9 @@ Do not tag a resource with multiple owned or shared tags. The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. -### Using the Out-of-Tree AWS Cloud Provider for RKE1 +### Using the Out-of-Tree AWS Cloud Provider for RKE -1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. +1. [Node name conventions and other prerequisites ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. :::note @@ -221,9 +221,9 @@ Refer to the offical AWS upstream documentation for the [cloud controller manage ### Helm Chart Installation from CLI -Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. +Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. -1. Add the helm repository: +1. Add the Helm repository: ```shell helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws @@ -338,7 +338,7 @@ clusterRoleRules: helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` -Verify that the helm chart installed successfully: +Verify that the Helm chart installed successfully: ```shell helm status -n kube-system aws-cloud-controller-manager @@ -409,7 +409,7 @@ nodeSelector: :::note -There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`: ``` yaml nodeSelector: @@ -424,7 +424,7 @@ nodeSelector: kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 +### Migrating to the Out-of-Tree AWS Cloud Provider for RKE To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. @@ -434,7 +434,7 @@ If your setup can't tolerate any control plane downtime, you must enable leader :::note Important -The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). @@ -725,7 +725,7 @@ clusterRoleRules: helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` -Verify that the helm chart installed successfully: +Verify that the Helm chart installed successfully: ```shell helm status -n kube-system aws-cloud-controller-manager @@ -790,7 +790,7 @@ nodeSelector: :::note -There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the Daemonset manually to set the `nodeSelector`: ```yaml nodeSelector: @@ -813,7 +813,7 @@ If your setup can't tolerate any control plane downtime, you must enable leader The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). ::: -#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 +#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE 1. Update the cluster config to enable leader migration in `cluster.yml` diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index 178a7e3c7bbf..f18a621bfad1 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,9 +21,9 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE1](#using-the-out-of-tree-aws-cloud-provider-for-rke1) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. -You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke1) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. +You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. @@ -175,9 +175,9 @@ Do not tag a resource with multiple owned or shared tags. The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. -### Using the Out-of-Tree AWS Cloud Provider for RKE1 +### Using the Out-of-Tree AWS Cloud Provider for RKE -1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. +1. [Node name conventions and other prerequisites ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. :::note @@ -221,9 +221,9 @@ Refer to the offical AWS upstream documentation for the [cloud controller manage ### Helm Chart Installation from CLI -Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. +Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. -1. Add the helm repository: +1. Add the Helm repository: ```shell helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws @@ -338,7 +338,7 @@ clusterRoleRules: helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` -Verify that the helm chart installed successfully: +Verify that the Helm chart installed successfully: ```shell helm status -n kube-system aws-cloud-controller-manager @@ -409,7 +409,7 @@ nodeSelector: :::note -There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`: ``` yaml nodeSelector: @@ -424,7 +424,7 @@ nodeSelector: kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 +### Migrating to the Out-of-Tree AWS Cloud Provider for RKE To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. @@ -434,7 +434,7 @@ If your setup can't tolerate any control plane downtime, you must enable leader :::note Important -The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). @@ -508,7 +508,7 @@ kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ### Using Out-of-tree AWS Cloud Provider for RKE2 -1. [Node name conventions and other prerequisities ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. 2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: @@ -725,7 +725,7 @@ clusterRoleRules: helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` -Verify that the helm chart installed successfully: +Verify that the Helm chart installed successfully: ```shell helm status -n kube-system aws-cloud-controller-manager @@ -790,7 +790,7 @@ nodeSelector: :::note -There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the daemonset manually to set the `nodeSelector`: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where nodeSelector can't be updated from the Rancher UI. Continue installing the chart and then edit the Daemonset manually to set the `nodeSelector`: ```yaml nodeSelector: @@ -813,7 +813,7 @@ If your setup can't tolerate any control plane downtime, you must enable leader The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). ::: -#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE1 +#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE 1. Update the cluster config to enable leader migration in `cluster.yml` From 67ef61ab22cc09787e0bdbabeb56640f267d9990 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 28 Nov 2023 16:03:56 -0500 Subject: [PATCH 22/24] fix headings, fix casing --- .../set-up-cloud-providers/amazon.md | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index f18a621bfad1..b8d72c716c8f 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,7 +21,7 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated. The Amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. @@ -219,7 +219,7 @@ Existing clusters that use an **External** cloud provider will set `--cloud-prov Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws). -### Helm Chart Installation from CLI +#### Helm Chart Installation from CLI Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. @@ -332,7 +332,7 @@ clusterRoleRules: - create ``` -3. Install the helm chart: +3. Install the Helm chart: ```shell helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml @@ -344,7 +344,7 @@ Verify that the Helm chart installed successfully: helm status -n kube-system aws-cloud-controller-manager ``` -4. If present, edit daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: +4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: ```shell kubectl edit daemonset aws-cloud-controller-manager -n kube-system @@ -356,7 +356,7 @@ kubectl edit daemonset aws-cloud-controller-manager -n kube-system kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Helm Chart Installation from UI +#### Helm Chart Installation from UI 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -418,7 +418,7 @@ nodeSelector: ::: -10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` deploys successfully: +10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager @@ -580,7 +580,7 @@ spec: 2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. -3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: +3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install: ```yaml spec: @@ -606,11 +606,11 @@ spec: - --cloud-provider=aws ``` -### Helm Chart Installation from CLI +#### Helm Chart Installation from CLI -Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. +Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. -1. Add the helm repository: +1. Add the Helm repository: ```shell helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws @@ -719,7 +719,7 @@ clusterRoleRules: - create ``` -3. Install the helm chart: +3. Install the Helm chart: ```shell helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml @@ -737,7 +737,7 @@ helm status -n kube-system aws-cloud-controller-manager kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Helm Chart Installation from UI +#### Helm Chart Installation from UI 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -799,7 +799,7 @@ nodeSelector: ::: -10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). +10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). ### Migrating to the Out-of-Tree AWS Cloud Provider @@ -920,7 +920,7 @@ Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfi - '--enable-leader-migration=true' ``` -4. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` successfully deployed: +4. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` successfully deployed: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager From 71bc47270b5240c10ef0cea51787de696ce546c0 Mon Sep 17 00:00:00 2001 From: martyav Date: Tue, 28 Nov 2023 16:04:34 -0500 Subject: [PATCH 23/24] apply prev commit to 2.8 --- .../set-up-cloud-providers/amazon.md | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index f18a621bfad1..b8d72c716c8f 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -21,7 +21,7 @@ To set up the Amazon cloud provider, :::note Important: -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated, and the amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated. The Amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. @@ -219,7 +219,7 @@ Existing clusters that use an **External** cloud provider will set `--cloud-prov Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws). -### Helm Chart Installation from CLI +#### Helm Chart Installation from CLI Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. @@ -332,7 +332,7 @@ clusterRoleRules: - create ``` -3. Install the helm chart: +3. Install the Helm chart: ```shell helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml @@ -344,7 +344,7 @@ Verify that the Helm chart installed successfully: helm status -n kube-system aws-cloud-controller-manager ``` -4. If present, edit daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: +4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: ```shell kubectl edit daemonset aws-cloud-controller-manager -n kube-system @@ -356,7 +356,7 @@ kubectl edit daemonset aws-cloud-controller-manager -n kube-system kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Helm Chart Installation from UI +#### Helm Chart Installation from UI 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -418,7 +418,7 @@ nodeSelector: ::: -10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` deploys successfully: +10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager @@ -580,7 +580,7 @@ spec: 2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. -3. Specify the `aws-cloud-controller-manager` helm chart as an additional manifest to install: +3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install: ```yaml spec: @@ -606,11 +606,11 @@ spec: - --cloud-provider=aws ``` -### Helm Chart Installation from CLI +#### Helm Chart Installation from CLI -Official upstream docs for [helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. +Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. -1. Add the helm repository: +1. Add the Helm repository: ```shell helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws @@ -719,7 +719,7 @@ clusterRoleRules: - create ``` -3. Install the helm chart: +3. Install the Helm chart: ```shell helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml @@ -737,7 +737,7 @@ helm status -n kube-system aws-cloud-controller-manager kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Helm Chart Installation from UI +#### Helm Chart Installation from UI 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -799,7 +799,7 @@ nodeSelector: ::: -10. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). +10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). ### Migrating to the Out-of-Tree AWS Cloud Provider @@ -920,7 +920,7 @@ Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfi - '--enable-leader-migration=true' ``` -4. Install the chart and confirm that the daemonset `aws-cloud-controller-manager` successfully deployed: +4. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` successfully deployed: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager From 2210cef390134964080a57abfdea6b60075b8ee3 Mon Sep 17 00:00:00 2001 From: Marty Hernandez Avedon Date: Fri, 1 Dec 2023 17:44:04 -0500 Subject: [PATCH 24/24] Reorganizing AWS migration pages (#1015) This partially addresses https://github.com/rancher/rancher-docs/issues/991 (rename file `migrating-from-in-tree-to-out-of-tree` to shorter and reference vsphere) and also fixes problems on the open PR: Duplicate sections (removed), difficulty navigating the file (split into two), sections with similar titles (opting for tabs instead of headings). I created this on its own working branch because moving around large blocks of text was unwieldly and I didn't want to mess up my local version of 763-document-aws-out-of-tree-v2prov. The last tab block (Helm Chart Installation through UI) contains contain that seems to be entirely the same for RKE and RKE2. --- .../set-up-cloud-providers/amazon.md | 606 ++++++------------ .../migrate-to-out-of-tree-amazon.md | 196 ++++++ ...e.md => migrate-to-out-of-tree-vsphere.md} | 2 +- docusaurus.config.js | 5 +- sidebars.js | 3 +- .../set-up-cloud-providers/amazon.md | 606 ++++++------------ .../migrate-to-out-of-tree-amazon.md | 196 ++++++ ...e.md => migrate-to-out-of-tree-vsphere.md} | 2 +- versioned_sidebars/version-2.8-sidebars.json | 3 +- 9 files changed, 768 insertions(+), 851 deletions(-) create mode 100644 docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md rename docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/{migrate-from-in-tree-to-out-of-tree.md => migrate-to-out-of-tree-vsphere.md} (98%) create mode 100644 versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md rename versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/{migrate-from-in-tree-to-out-of-tree.md => migrate-to-out-of-tree-vsphere.md} (98%) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index b8d72c716c8f..cf83a023df64 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -7,6 +7,16 @@ weight: 1 +:::note Important: + +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated. The Amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. + +You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](./migrate-to-out-of-tree-amazon.md) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. + +Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. + +::: + When you use Amazon as a cloud provider, you can leverage the following capabilities: - **Load Balancers:** Launch an AWS Elastic Load Balancer (ELB) when you select `Layer-4 Load Balancer` in **Port Mapping** or when you launch a `Service` with `type: LoadBalancer`. @@ -19,16 +29,6 @@ To set up the Amazon cloud provider, 1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) 2. [Configure the ClusterID](#2-configure-the-clusterid) -:::note Important: - -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated. The Amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. - -You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. - -Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. - -::: - ### 1. Create an IAM Role and attach to the instances All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: @@ -175,7 +175,112 @@ Do not tag a resource with multiple owned or shared tags. The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. -### Using the Out-of-Tree AWS Cloud Provider for RKE +### Using the Out-of-Tree AWS Cloud Provider + + + + +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for the cloud provider to find the instance correctly. + +2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: + + +**Override on Etcd:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/etcd-role + operator: In + values: + - 'true' +``` + +**Override on Control Plane:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kube-apiserver-arg: + - cloud-provider=external + kube-controller-manager-arg: + - cloud-provider=external + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +**Override on Worker:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/worker-role + operator: In + values: + - 'true' +``` + +2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. + +3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install: + +```yaml +spec: + rkeConfig: + additionalManifest: |- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + spec: + chart: aws-cloud-controller-manager + repo: https://kubernetes.github.io/cloud-provider-aws + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + hostNetworking: true + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + args: + - --configure-cloud-routes=false + - --v=5 + - --cloud-provider=aws +``` + + + + 1. [Node name conventions and other prerequisites ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. @@ -200,7 +305,7 @@ You must not enable `useInstanceMetadataHostname` when setting custom values for Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and enables `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. -::: note +:::note You must disable `useInstanceMetadataHostname` when setting a custom node name for custom clusters via `node-name`. @@ -219,7 +324,13 @@ Existing clusters that use an **External** cloud provider will set `--cloud-prov Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws). -#### Helm Chart Installation from CLI + + + +### Helm Chart Installation from CLI + + + Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. @@ -230,7 +341,7 @@ helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-pr helm repo update ``` -2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`: +2. Create a `values.yaml` file with the following contents to override the default `values.yaml`: ```yaml # values.yaml @@ -335,7 +446,7 @@ clusterRoleRules: 3. Install the Helm chart: ```shell -helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` Verify that the Helm chart installed successfully: @@ -344,269 +455,15 @@ Verify that the Helm chart installed successfully: helm status -n kube-system aws-cloud-controller-manager ``` -4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: - -```shell -kubectl edit daemonset aws-cloud-controller-manager -n kube-system -``` - -5. (Optional) Verify that the cloud controller manager update succeeded: - -```shell -kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager -``` - -#### Helm Chart Installation from UI - -1. Click **☰**, then select the name of the cluster from the left navigation. - -2. Select **Apps** > **Repositories**. - -3. Click the **Create** button. - -4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. - -5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. - -6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. - -7. Add the following container arguments: - -```yaml - - '--use-service-account-credentials=true' - - '--configure-cloud-routes=false' -``` - -8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup: - -```yaml - - apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - create - - get -``` - -9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: - -```yaml -tolerations: - - effect: NoSchedule - key: node.cloudprovider.kubernetes.io/uninitialized - value: 'true' - - effect: NoSchedule - value: 'true' - key: node-role.kubernetes.io/controlplane - -``` - -```yaml -nodeSelector: - node-role.kubernetes.io/controlplane: 'true' -``` - -:::note - -There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`: - -``` yaml -nodeSelector: - node-role.kubernetes.io/controlplane: 'true' -``` - -::: - -10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully: - -```shell -kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager -``` - -### Migrating to the Out-of-Tree AWS Cloud Provider for RKE - -To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. - -If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. - -If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. - -:::note Important - -The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. - -Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). - -::: - -1. Update the cluster config to enable leader migration in `cluster.yml`: - -```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" -``` - -Note that the cloud provider is still `aws` at this step: - -```yaml -cloud_provider: - name: aws -``` - -2. Cordon the control plane nodes, so that the AWS cloud controller pods run on nodes only after upgrading to the external cloud provider. - -```shell -kubectl cordon -l "node-role.kubernetes.io/controlplane=true" -``` - -3. To install the AWS cloud controller manager, you must enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): - -```yaml -- '--enable-leader-migration=true' -``` - -4. Confirm that the chart is installed but the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will uncordon each node after upgrading and `aws-controller-manager` pods will be scheduled. - -5. Update `cluster.yml` to change the cloud provider and remove the leader migration arguments from the kube-controller. - -Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and lets you enable `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: - -```yaml -rancher_kubernetes_engine_config: - cloud_provider: - name: external-aws - useInstanceMetadataHostname: true/false -``` - -**Remove** `enable-leader-migration` from: - -```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" -``` - -6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. - -7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. - -8. (Optional) After the upgrade, leader migration is no longer required due to only one cloud-controller-manager and can be removed. Upgrade the chart and remove the following section from the container arguments: - -```shell -- --enable-leader-migration=true -``` - -Verify the cloud controller manager update was successfully rolled out with the following command: +4. (Optional) Verify that the cloud controller manager update succeeded: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Using Out-of-tree AWS Cloud Provider for RKE2 + -1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. - -2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: - -```yaml -spec: - rkeConfig: - machineGlobalConfig: - cloud-provider-name: aws -``` - -This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: - - -**Override on Etcd:** - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kubelet-arg: - - cloud-provider=external - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/etcd-role - operator: In - values: - - 'true' -``` - -**Override on Control Plane:** - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - disable-cloud-controller: true - kube-apiserver-arg: - - cloud-provider=external - kube-controller-manager-arg: - - cloud-provider=external - kubelet-arg: - - cloud-provider=external - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/control-plane-role - operator: In - values: - - 'true' -``` - -**Override on Worker:** - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kubelet-arg: - - cloud-provider=external - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/worker-role - operator: In - values: - - 'true' -``` - -2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. - -3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install: - -```yaml -spec: - rkeConfig: - additionalManifest: |- - apiVersion: helm.cattle.io/v1 - kind: HelmChart - metadata: - name: aws-cloud-controller-manager - namespace: kube-system - spec: - chart: aws-cloud-controller-manager - repo: https://kubernetes.github.io/cloud-provider-aws - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - hostNetworking: true - nodeSelector: - node-role.kubernetes.io/control-plane: "true" - args: - - --configure-cloud-routes=false - - --v=5 - - --cloud-provider=aws -``` - -#### Helm Chart Installation from CLI + Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. @@ -617,7 +474,7 @@ helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-pr helm repo update ``` -2. Create a `values.yaml` file with the following contents to override the default `values.yaml`: +2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`: ```yaml # values.yaml @@ -722,7 +579,7 @@ clusterRoleRules: 3. Install the Helm chart: ```shell -helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` Verify that the Helm chart installed successfully: @@ -731,13 +588,25 @@ Verify that the Helm chart installed successfully: helm status -n kube-system aws-cloud-controller-manager ``` -4. (Optional) Verify that the cloud controller manager update succeeded: +4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: + +```shell +kubectl edit daemonset aws-cloud-controller-manager -n kube-system +``` + +5. (Optional) Verify that the cloud controller manager update succeeded: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -#### Helm Chart Installation from UI + + + +### Helm Chart Installation from UI + + + 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -801,184 +670,75 @@ nodeSelector: 10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). -### Migrating to the Out-of-Tree AWS Cloud Provider - -To migrate from the in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. - -If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-the-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. + -If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + -:::note Important: -The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). -::: - -#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE - -1. Update the cluster config to enable leader migration in `cluster.yml` - -```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" -``` - -Note that the cloud provider is still `aws` at this step. - -```yaml -cloud_provider: - name: aws -``` - -2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: +1. Click **☰**, then select the name of the cluster from the left navigation. -```shell -kubectl cordon -l "node-role.kubernetes.io/controlplane=true" -``` +2. Select **Apps** > **Repositories**. -3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): +3. Click the **Create** button. -```shell -- '--enable-leader-migration=true' -``` +4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. -4. Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will upgrade and uncordon each node, and schedule `aws-controller-manager` pods. +5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. -5. Update cluster.yml to change the cloud provider and remove the leader migration arguments from the kube-controller. +6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. -Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` queries the EC2 metadata service and sets `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: +7. Add the following container arguments: ```yaml -rancher_kubernetes_engine_config: - cloud_provider: - name: external-aws - useInstanceMetadataHostname: true/false -``` + - '--use-service-account-credentials=true' + - '--configure-cloud-routes=false' +``` -Remove `enable-leader-migration` if you don't want it enabled in your cluster: +8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup: ```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get ``` -:::tip -You can also disable leader migration after step 7. Upgrade the chart and remove the following section from the container arguments: +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: ```yaml -- --enable-leader-migration=true -``` - -::: - -6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. - -7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. - -#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE2 - -1. Update the cluster config to enable leader migration: +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kube-controller-manager-arg: - - enable-leader-migration - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/control-plane-role - operator: In - values: - - 'true' ``` -Note that the cloud provider is still `aws` at this step: - ```yaml -spec: - rkeConfig: - machineGlobalConfig: - cloud-provider-name: aws -``` - -2. Cordon control plane nodes so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: - -```shell -kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' ``` -3. To install the AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying the cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) -From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. -Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: - -```shell -- '--enable-leader-migration=true' -``` +:::note -4. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` successfully deployed: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`: -```shell -kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` yaml +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' ``` -5. Update the provisioning cluster to change the cloud provider and remove leader migration args from the kube controller. -If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file - -:::note Important - -Only remove `cloud-provider-name: aws` if not relying on the rke2 supervisor to correctly set the providerID. - ::: -Remove `enable-leader-migration` if you don't want it enabled in your cluster: - -```yaml -spec: - rkeConfig: - machineGlobalConfig: - cloud-provider-name: external -``` - -Remove `enable-leader-migration` from: - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kube-controller-manager-arg: - - enable-leader-migration - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/control-plane-role - operator: In - values: - - 'true' -``` - -:::tip -You can also disable leader migration after the upgrade, as leader migration is no longer required due to only one cloud-controller-manager and can be removed. -Upgrade the chart and remove the following section from the container arguments: - -```yaml -- --enable-leader-migration=true -``` - -Verify the cloud controller manager update was successfully rolled out with the following command: +10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -::: - -6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: - -```shell -kubectl describe nodes | grep "ProviderID" -``` + + diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md new file mode 100644 index 000000000000..c65bef6ec155 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md @@ -0,0 +1,196 @@ +--- +title: Migrating Amazon In-tree to Out-of-tree +--- + + + + + +Kubernetes is moving away from maintaining cloud providers in-tree. In Kubernetes 1.27 and later, the in-tree cloud providers have been removed. + +You can migrate from an in-tree to an out-of-tree AWS cloud provider on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. + +To migrate from the in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +If it's acceptable to have some downtime, you can [switch to an external cloud provider](./amazon.md#using-the-out-of-tree-aws-cloud-provider-for-rke), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. + +If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +:::note Important: +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). +::: + + + + +1. Update the cluster config to enable leader migration: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +Note that the cloud provider is still `aws` at this step: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +2. Cordon control plane nodes so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: + +```shell +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying the cloud controller manager chart](./amazon.md#using-out-of-tree-aws-cloud-provider-for-rke2) +From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. +Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: + +```shell +- '--enable-leader-migration=true' +``` + +4. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` successfully deployed: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +5. Update the provisioning cluster to change the cloud provider and remove leader migration args from the kube controller. +If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file + +:::note Important + +Only remove `cloud-provider-name: aws` if not relying on the rke2 supervisor to correctly set the providerID. + +::: + +Remove `enable-leader-migration` if you don't want it enabled in your cluster: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: external +``` + +Remove `enable-leader-migration` from: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +:::tip +You can also disable leader migration after the upgrade, as leader migration is no longer required due to only one cloud-controller-manager and can be removed. +Upgrade the chart and remove the following section from the container arguments: + +```yaml +- --enable-leader-migration=true +``` +::: + +Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: + +```shell +kubectl describe nodes | grep "ProviderID" +``` + + + + + +1. Update the cluster config to enable leader migration in `cluster.yml`: + +```yaml +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +Note that the cloud provider is still `aws` at this step: + +```yaml +cloud_provider: + name: aws +``` + +2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: + +```shell +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager, you must enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](./amazon.md#helm-chart-installation-from-ui-for-rke): + +```yaml +- '--enable-leader-migration=true' +``` + +4. Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will upgrade and uncordon each node, and schedule `aws-controller-manager` pods. + +5. Update `cluster.yml` to change the cloud provider and remove the leader migration arguments from the kube-controller. + + Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and lets you enable `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: + +```yaml +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true/false +``` + + Remove `enable-leader-migration` if you don't want it enabled in your cluster: + + ```yaml + services: + kube-controller: + extra_args: + enable-leader-migration: "true" + ``` + +:::tip +You can also disable leader migration after you finish the migration. Upgrade the chart and remove the following section from the container arguments: + +```yaml +- --enable-leader-migration=true +``` +::: + +6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. + +7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. + + + + diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere.md similarity index 98% rename from docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md rename to docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere.md index 3eb227d79ec9..a3bc9b89d2db 100644 --- a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere.md @@ -1,5 +1,5 @@ --- -title: Migrating vSphere In-tree Volumes to Out-of-tree +title: Migrating vSphere In-tree to Out-of-tree --- diff --git a/docusaurus.config.js b/docusaurus.config.js index 6db813113de7..899412309072 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -582,9 +582,12 @@ module.exports = { from: '/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere' }, { - to: '/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree', + to: '/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere', from: '/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/migrate-from-in-tree-to-out-of-tree' }, + { to: '/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere', + from: '/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree' + }, { to: '/how-to-guides/new-user-guides/add-users-to-projects', from: '/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects' diff --git a/sidebars.js b/sidebars.js index ce3687e759b7..9d920f8d81c8 100644 --- a/sidebars.js +++ b/sidebars.js @@ -493,11 +493,12 @@ const sidebars = { }, items: [ "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/azure", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/google-compute-engine", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere", - "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere", ] }, "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters", diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md index b8d72c716c8f..cf83a023df64 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon.md @@ -7,6 +7,16 @@ weight: 1 +:::note Important: + +In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated. The Amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. + +You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](./migrate-to-out-of-tree-amazon.md) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. + +Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. + +::: + When you use Amazon as a cloud provider, you can leverage the following capabilities: - **Load Balancers:** Launch an AWS Elastic Load Balancer (ELB) when you select `Layer-4 Load Balancer` in **Port Mapping** or when you launch a `Service` with `type: LoadBalancer`. @@ -19,16 +29,6 @@ To set up the Amazon cloud provider, 1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) 2. [Configure the ClusterID](#2-configure-the-clusterid) -:::note Important: - -In Kubernetes 1.27 and later, you must use an out-of-tree AWS cloud provider. In-tree cloud providers have been deprecated. The Amazon cloud provider has been removed completely, and won't work after an upgrade to Kubernetes 1.27. The steps listed below are still required to set up an Amazon cloud provider. You can [set up an out-of-tree cloud provider for RKE](#using-the-out-of-tree-aws-cloud-provider-for-rke) after creating an IAM role and configuring the ClusterID. - -You can also [migrate from an in-tree to an out-of-tree AWS cloud provider](#migrating-to-the-out-of-tree-aws-cloud-provider-for-rke) on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. - -Starting with Kubernetes 1.23, you must deactivate the `CSIMigrationAWS` feature gate to use the in-tree AWS cloud provider. You can do this by setting `feature-gates=CSIMigrationAWS=false` as an additional argument for the cluster's Kubelet, Controller Manager, API Server and Scheduler in the advanced cluster configuration. - -::: - ### 1. Create an IAM Role and attach to the instances All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: @@ -175,7 +175,112 @@ Do not tag a resource with multiple owned or shared tags. The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. -### Using the Out-of-Tree AWS Cloud Provider for RKE +### Using the Out-of-Tree AWS Cloud Provider + + + + +1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for the cloud provider to find the instance correctly. + +2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: + + +**Override on Etcd:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/etcd-role + operator: In + values: + - 'true' +``` + +**Override on Control Plane:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + disable-cloud-controller: true + kube-apiserver-arg: + - cloud-provider=external + kube-controller-manager-arg: + - cloud-provider=external + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +**Override on Worker:** + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kubelet-arg: + - cloud-provider=external + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/worker-role + operator: In + values: + - 'true' +``` + +2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. + +3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install: + +```yaml +spec: + rkeConfig: + additionalManifest: |- + apiVersion: helm.cattle.io/v1 + kind: HelmChart + metadata: + name: aws-cloud-controller-manager + namespace: kube-system + spec: + chart: aws-cloud-controller-manager + repo: https://kubernetes.github.io/cloud-provider-aws + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + hostNetworking: true + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + args: + - --configure-cloud-routes=false + - --v=5 + - --cloud-provider=aws +``` + + + + 1. [Node name conventions and other prerequisites ](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed so that the cloud provider can find the instance. Rancher provisioned clusters don't support configuring `providerID`. @@ -200,7 +305,7 @@ You must not enable `useInstanceMetadataHostname` when setting custom values for Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and enables `useInstanceMetadataHostname`. As mentioned in step 1, enabling `useInstanceMetadataHostname` will query the EC2 metadata service and set `http://169.254.169.254/latest/meta-data/hostname` as `hostname-override` for `kubelet` and `kube-proxy`. -::: note +:::note You must disable `useInstanceMetadataHostname` when setting a custom node name for custom clusters via `node-name`. @@ -219,7 +324,13 @@ Existing clusters that use an **External** cloud provider will set `--cloud-prov Refer to the offical AWS upstream documentation for the [cloud controller manager](https://kubernetes.github.io/cloud-provider-aws). -#### Helm Chart Installation from CLI + + + +### Helm Chart Installation from CLI + + + Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. @@ -230,7 +341,7 @@ helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-pr helm repo update ``` -2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`: +2. Create a `values.yaml` file with the following contents to override the default `values.yaml`: ```yaml # values.yaml @@ -335,7 +446,7 @@ clusterRoleRules: 3. Install the Helm chart: ```shell -helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` Verify that the Helm chart installed successfully: @@ -344,269 +455,15 @@ Verify that the Helm chart installed successfully: helm status -n kube-system aws-cloud-controller-manager ``` -4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: - -```shell -kubectl edit daemonset aws-cloud-controller-manager -n kube-system -``` - -5. (Optional) Verify that the cloud controller manager update succeeded: - -```shell -kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager -``` - -#### Helm Chart Installation from UI - -1. Click **☰**, then select the name of the cluster from the left navigation. - -2. Select **Apps** > **Repositories**. - -3. Click the **Create** button. - -4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. - -5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. - -6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. - -7. Add the following container arguments: - -```yaml - - '--use-service-account-credentials=true' - - '--configure-cloud-routes=false' -``` - -8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup: - -```yaml - - apiGroups: - - '' - resources: - - serviceaccounts - verbs: - - create - - get -``` - -9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: - -```yaml -tolerations: - - effect: NoSchedule - key: node.cloudprovider.kubernetes.io/uninitialized - value: 'true' - - effect: NoSchedule - value: 'true' - key: node-role.kubernetes.io/controlplane - -``` - -```yaml -nodeSelector: - node-role.kubernetes.io/controlplane: 'true' -``` - -:::note - -There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`: - -``` yaml -nodeSelector: - node-role.kubernetes.io/controlplane: 'true' -``` - -::: - -10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully: - -```shell -kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager -``` - -### Migrating to the Out-of-Tree AWS Cloud Provider for RKE - -To migrate from an in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. - -If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. - -If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. - -:::note Important - -The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) mentions that it is possible to migrate with the same Kubernetes version, but assumes that migration is part of a Kubernetes upgrade. - -Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). - -::: - -1. Update the cluster config to enable leader migration in `cluster.yml`: - -```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" -``` - -Note that the cloud provider is still `aws` at this step: - -```yaml -cloud_provider: - name: aws -``` - -2. Cordon the control plane nodes, so that the AWS cloud controller pods run on nodes only after upgrading to the external cloud provider. - -```shell -kubectl cordon -l "node-role.kubernetes.io/controlplane=true" -``` - -3. To install the AWS cloud controller manager, you must enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): - -```yaml -- '--enable-leader-migration=true' -``` - -4. Confirm that the chart is installed but the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will uncordon each node after upgrading and `aws-controller-manager` pods will be scheduled. - -5. Update `cluster.yml` to change the cloud provider and remove the leader migration arguments from the kube-controller. - -Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and lets you enable `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: - -```yaml -rancher_kubernetes_engine_config: - cloud_provider: - name: external-aws - useInstanceMetadataHostname: true/false -``` - -**Remove** `enable-leader-migration` from: - -```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" -``` - -6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. - -7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. - -8. (Optional) After the upgrade, leader migration is no longer required due to only one cloud-controller-manager and can be removed. Upgrade the chart and remove the following section from the container arguments: - -```shell -- --enable-leader-migration=true -``` - -Verify the cloud controller manager update was successfully rolled out with the following command: +4. (Optional) Verify that the cloud controller manager update succeeded: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -### Using Out-of-tree AWS Cloud Provider for RKE2 + -1. [Node name conventions and other prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) must be followed for cloud provider to find the instance correctly. - -2. Rancher managed RKE2/K3s clusters don't support configuring `providerID`. However, the engine will set the node name correctly if the following configuration is set on the provisioning cluster object: - -```yaml -spec: - rkeConfig: - machineGlobalConfig: - cloud-provider-name: aws -``` - -This option will be passed to the configuration of the various Kubernetes components that run on the node, and must be overridden per component to prevent the in-tree provider from running unintentionally: - - -**Override on Etcd:** - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kubelet-arg: - - cloud-provider=external - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/etcd-role - operator: In - values: - - 'true' -``` - -**Override on Control Plane:** - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - disable-cloud-controller: true - kube-apiserver-arg: - - cloud-provider=external - kube-controller-manager-arg: - - cloud-provider=external - kubelet-arg: - - cloud-provider=external - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/control-plane-role - operator: In - values: - - 'true' -``` - -**Override on Worker:** - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kubelet-arg: - - cloud-provider=external - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/worker-role - operator: In - values: - - 'true' -``` - -2. Select `Amazon` if relying on the above mechanism to set the provider ID. Otherwise, select **External (out-of-tree)** cloud provider, which sets `--cloud-provider=external` for Kubernetes components. - -3. Specify the `aws-cloud-controller-manager` Helm chart as an additional manifest to install: - -```yaml -spec: - rkeConfig: - additionalManifest: |- - apiVersion: helm.cattle.io/v1 - kind: HelmChart - metadata: - name: aws-cloud-controller-manager - namespace: kube-system - spec: - chart: aws-cloud-controller-manager - repo: https://kubernetes.github.io/cloud-provider-aws - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - hostNetworking: true - nodeSelector: - node-role.kubernetes.io/control-plane: "true" - args: - - --configure-cloud-routes=false - - --v=5 - - --cloud-provider=aws -``` - -#### Helm Chart Installation from CLI + Official upstream docs for [Helm chart installation](https://github.com/kubernetes/cloud-provider-aws/tree/master/charts/aws-cloud-controller-manager) can be found on Github. @@ -617,7 +474,7 @@ helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-pr helm repo update ``` -2. Create a `values.yaml` file with the following contents to override the default `values.yaml`: +2. Create a `values.yaml` file with the following contents, to override the default `values.yaml`: ```yaml # values.yaml @@ -722,7 +579,7 @@ clusterRoleRules: 3. Install the Helm chart: ```shell -helm upgrade --install aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml +helm upgrade --install aws-cloud-controller-manager -n kube-system aws-cloud-controller-manager/aws-cloud-controller-manager --values values.yaml ``` Verify that the Helm chart installed successfully: @@ -731,13 +588,25 @@ Verify that the Helm chart installed successfully: helm status -n kube-system aws-cloud-controller-manager ``` -4. (Optional) Verify that the cloud controller manager update succeeded: +4. If present, edit the Daemonset to remove the default node selector `node-role.kubernetes.io/control-plane: ""`: + +```shell +kubectl edit daemonset aws-cloud-controller-manager -n kube-system +``` + +5. (Optional) Verify that the cloud controller manager update succeeded: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -#### Helm Chart Installation from UI + + + +### Helm Chart Installation from UI + + + 1. Click **☰**, then select the name of the cluster from the left navigation. @@ -801,184 +670,75 @@ nodeSelector: 10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` is running. Verify `aws-cloud-controller-manager` pods are running in target namespace (`kube-system` unless modified in step 6). -### Migrating to the Out-of-Tree AWS Cloud Provider - -To migrate from the in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. - -If it's acceptable to have some downtime, you can [switch to an external cloud provider](#using-the-out-of-tree-aws-cloud-provider), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. + -If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + -:::note Important: -The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). -::: - -#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE - -1. Update the cluster config to enable leader migration in `cluster.yml` - -```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" -``` - -Note that the cloud provider is still `aws` at this step. - -```yaml -cloud_provider: - name: aws -``` - -2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: +1. Click **☰**, then select the name of the cluster from the left navigation. -```shell -kubectl cordon -l "node-role.kubernetes.io/controlplane=true" -``` +2. Select **Apps** > **Repositories**. -3. To install the AWS cloud controller manager, enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](#helm-chart-installation-from-ui): +3. Click the **Create** button. -```shell -- '--enable-leader-migration=true' -``` +4. Enter `https://kubernetes.github.io/cloud-provider-aws` in the **Index URL** field. -4. Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will upgrade and uncordon each node, and schedule `aws-controller-manager` pods. +5. Select **Apps** > **Charts** from the left navigation and install **aws-cloud-controller-manager**. -5. Update cluster.yml to change the cloud provider and remove the leader migration arguments from the kube-controller. +6. Select the namespace, `kube-system`, and enable **Customize Helm options before install**. -Selecting **External Amazon (out-of-tree)** will set `--cloud-provider=external` and allow enabling `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` queries the EC2 metadata service and sets `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: +7. Add the following container arguments: ```yaml -rancher_kubernetes_engine_config: - cloud_provider: - name: external-aws - useInstanceMetadataHostname: true/false -``` + - '--use-service-account-credentials=true' + - '--configure-cloud-routes=false' +``` -Remove `enable-leader-migration` if you don't want it enabled in your cluster: +8. Add `get` to `verbs` for `serviceaccounts` resources in `clusterRoleRules`. This allows the cloud controller manager to get service accounts upon startup: ```yaml -services: - kube-controller: - extra_args: - enable-leader-migration: "true" + - apiGroups: + - '' + resources: + - serviceaccounts + verbs: + - create + - get ``` -:::tip -You can also disable leader migration after step 7. Upgrade the chart and remove the following section from the container arguments: +9. Rancher-provisioned RKE nodes are tainted `node-role.kubernetes.io/controlplane`. Update tolerations and the nodeSelector: ```yaml -- --enable-leader-migration=true -``` - -::: - -6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. - -7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. - -#### Migrating to the Out-of-Tree AWS Cloud Provider for RKE2 - -1. Update the cluster config to enable leader migration: +tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: 'true' + - effect: NoSchedule + value: 'true' + key: node-role.kubernetes.io/controlplane -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kube-controller-manager-arg: - - enable-leader-migration - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/control-plane-role - operator: In - values: - - 'true' ``` -Note that the cloud provider is still `aws` at this step: - ```yaml -spec: - rkeConfig: - machineGlobalConfig: - cloud-provider-name: aws -``` - -2. Cordon control plane nodes so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: - -```shell -kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' ``` -3. To install the AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying the cloud controller manager chart](#using-out-of-tree-aws-cloud-provider-for-rke2) -From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. -Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: - -```shell -- '--enable-leader-migration=true' -``` +:::note -4. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` successfully deployed: +There's currently a [known issue](https://github.com/rancher/dashboard/issues/9249) where `nodeSelector` can't be updated from the Rancher UI. Continue installing the chart and then Daemonset manually to set the `nodeSelector`: -```shell -kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` yaml +nodeSelector: + node-role.kubernetes.io/controlplane: 'true' ``` -5. Update the provisioning cluster to change the cloud provider and remove leader migration args from the kube controller. -If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file - -:::note Important - -Only remove `cloud-provider-name: aws` if not relying on the rke2 supervisor to correctly set the providerID. - ::: -Remove `enable-leader-migration` if you don't want it enabled in your cluster: - -```yaml -spec: - rkeConfig: - machineGlobalConfig: - cloud-provider-name: external -``` - -Remove `enable-leader-migration` from: - -```yaml -spec: - rkeConfig: - machineSelectorConfig: - - config: - kube-controller-manager-arg: - - enable-leader-migration - machineLabelSelector: - matchExpressions: - - key: rke.cattle.io/control-plane-role - operator: In - values: - - 'true' -``` - -:::tip -You can also disable leader migration after the upgrade, as leader migration is no longer required due to only one cloud-controller-manager and can be removed. -Upgrade the chart and remove the following section from the container arguments: - -```yaml -- --enable-leader-migration=true -``` - -Verify the cloud controller manager update was successfully rolled out with the following command: +10. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` deploys successfully: ```shell kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager ``` -::: - -6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: - -```shell -kubectl describe nodes | grep "ProviderID" -``` + + diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md new file mode 100644 index 000000000000..c65bef6ec155 --- /dev/null +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon.md @@ -0,0 +1,196 @@ +--- +title: Migrating Amazon In-tree to Out-of-tree +--- + + + + + +Kubernetes is moving away from maintaining cloud providers in-tree. In Kubernetes 1.27 and later, the in-tree cloud providers have been removed. + +You can migrate from an in-tree to an out-of-tree AWS cloud provider on Kubernetes 1.26 and earlier. All existing clusters must migrate prior to upgrading to v1.27 in order to stay functional. + +To migrate from the in-tree cloud provider to the out-of-tree AWS cloud provider, you must stop the existing cluster's kube controller manager and install the AWS cloud controller manager. There are many ways to do this. Refer to the official AWS documentation on the [external cloud controller manager](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for details. + +If it's acceptable to have some downtime, you can [switch to an external cloud provider](./amazon.md#using-the-out-of-tree-aws-cloud-provider-for-rke), which removes in-tree components and then deploy charts to install the AWS cloud controller manager. + +If your setup can't tolerate any control plane downtime, you must enable leader migration. This facilitates a smooth transition from the controllers in the kube controller manager to their counterparts in the cloud controller manager. Refer to the official AWS documentation on [Using leader migration](https://cloud-provider-aws.sigs.k8s.io/getting_started/) for more details. + +:::note Important: +The Kubernetes [cloud controller migration documentation](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#before-you-begin) states that it's possible to migrate with the same Kubernetes version, but assumes that the migration is part of a Kubernetes upgrade. Refer to the Kubernetes documentation on [migrating to use the cloud controller manager](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/) to see if you need to customize your setup before migrating. Confirm your [migration configuration values](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#default-configuration). If your cloud provider provides an implementation of the Node IPAM controller, you also need to [migrate the IPAM controller](https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/#node-ipam-controller-migration). +::: + + + + +1. Update the cluster config to enable leader migration: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +Note that the cloud provider is still `aws` at this step: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: aws +``` + +2. Cordon control plane nodes so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: + +```shell +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager with leader migration enabled, follow Steps 1-3 for [deploying the cloud controller manager chart](./amazon.md#using-out-of-tree-aws-cloud-provider-for-rke2) +From Kubernetes 1.22 onwards, the kube-controller-manager will utilize a default configuration which will satisfy the controller-to-manager migration. +Update container args of the `aws-cloud-controller-manager` under `spec.rkeConfig.additionalManifest` to enable leader migration: + +```shell +- '--enable-leader-migration=true' +``` + +4. Install the chart and confirm that the Daemonset `aws-cloud-controller-manager` successfully deployed: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +5. Update the provisioning cluster to change the cloud provider and remove leader migration args from the kube controller. +If upgrading the Kubernetes version, set the Kubernetes version as well in the `spec.kubernetesVersion` section of the cluster YAML file + +:::note Important + +Only remove `cloud-provider-name: aws` if not relying on the rke2 supervisor to correctly set the providerID. + +::: + +Remove `enable-leader-migration` if you don't want it enabled in your cluster: + +```yaml +spec: + rkeConfig: + machineGlobalConfig: + cloud-provider-name: external +``` + +Remove `enable-leader-migration` from: + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + kube-controller-manager-arg: + - enable-leader-migration + machineLabelSelector: + matchExpressions: + - key: rke.cattle.io/control-plane-role + operator: In + values: + - 'true' +``` + +:::tip +You can also disable leader migration after the upgrade, as leader migration is no longer required due to only one cloud-controller-manager and can be removed. +Upgrade the chart and remove the following section from the container arguments: + +```yaml +- --enable-leader-migration=true +``` +::: + +Verify the cloud controller manager update was successfully rolled out with the following command: + +```shell +kubectl rollout status daemonset -n kube-system aws-cloud-controller-manager +``` + +6. The cloud provider is responsible for setting the ProviderID of the node. Check if all nodes are initialized with the ProviderID: + +```shell +kubectl describe nodes | grep "ProviderID" +``` + + + + + +1. Update the cluster config to enable leader migration in `cluster.yml`: + +```yaml +services: + kube-controller: + extra_args: + enable-leader-migration: "true" +``` + +Note that the cloud provider is still `aws` at this step: + +```yaml +cloud_provider: + name: aws +``` + +2. Cordon the control plane nodes, so that AWS cloud controller pods run on nodes only after upgrading to the external cloud provider: + +```shell +kubectl cordon -l "node-role.kubernetes.io/controlplane=true" +``` + +3. To install the AWS cloud controller manager, you must enable leader migration and follow the same steps as when installing AWS on a new cluster. To enable leader migration, add the following to the container arguments in step 7 while following the [steps to install the chart](./amazon.md#helm-chart-installation-from-ui-for-rke): + +```yaml +- '--enable-leader-migration=true' +``` + +4. Confirm that the chart is installed but that the new pods aren't running yet due to cordoned controlplane nodes. After updating the cluster in the next step, RKE will upgrade and uncordon each node, and schedule `aws-controller-manager` pods. + +5. Update `cluster.yml` to change the cloud provider and remove the leader migration arguments from the kube-controller. + + Selecting **External Amazon (out-of-tree)** sets `--cloud-provider=external` and lets you enable `useInstanceMetadataHostname`. You must enable `useInstanceMetadataHostname` for node-driver clusters and for custom clusters if not you don't provide a custom node name via `--node-name`. Enabling `useInstanceMetadataHostname` will query ec2 metadata service and set `/hostname` as `hostname-override` for `kubelet` and `kube-proxy`: + +```yaml +rancher_kubernetes_engine_config: + cloud_provider: + name: external-aws + useInstanceMetadataHostname: true/false +``` + + Remove `enable-leader-migration` if you don't want it enabled in your cluster: + + ```yaml + services: + kube-controller: + extra_args: + enable-leader-migration: "true" + ``` + +:::tip +You can also disable leader migration after you finish the migration. Upgrade the chart and remove the following section from the container arguments: + +```yaml +- --enable-leader-migration=true +``` +::: + +6. If you're upgrading the cluster's Kubernetes version, set the Kubernetes version as well. + +7. Update the cluster. The `aws-cloud-controller-manager` pods should now be running. + + + + diff --git a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere.md similarity index 98% rename from versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md rename to versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere.md index 3eb227d79ec9..a3bc9b89d2db 100644 --- a/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree.md +++ b/versioned_docs/version-2.8/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere.md @@ -1,5 +1,5 @@ --- -title: Migrating vSphere In-tree Volumes to Out-of-tree +title: Migrating vSphere In-tree to Out-of-tree --- diff --git a/versioned_sidebars/version-2.8-sidebars.json b/versioned_sidebars/version-2.8-sidebars.json index 0e0f3c923c83..6a1f4ede4dbc 100644 --- a/versioned_sidebars/version-2.8-sidebars.json +++ b/versioned_sidebars/version-2.8-sidebars.json @@ -464,11 +464,12 @@ }, "items": [ "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/amazon", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-amazon", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/azure", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/google-compute-engine", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-in-tree-vsphere", "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/configure-out-of-tree-vsphere", - "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-from-in-tree-to-out-of-tree" + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-cloud-providers/migrate-to-out-of-tree-vsphere" ] }, "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters"