From 4a13d7e2c383c7d2235c233dbca5b6a1b979253b Mon Sep 17 00:00:00 2001 From: Paco Xu Date: Fri, 13 May 2022 14:53:54 +0800 Subject: [PATCH 1/3] update 0.0.3-dev --- Makefile | 2 +- config/manager/kustomization.yaml | 2 +- config/manager/manager.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index a93f42b..010403d 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Image URL to use all building/pushing image targets -IMG ?= daocloud.io/daocloud/kubeadm-operator:v0.0.2 +IMG ?= daocloud.io/daocloud/kubeadm-operator:v0.0.3-dev # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd" diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 08e8194..97b93db 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -5,4 +5,4 @@ kind: Kustomization images: - name: controller newName: daocloud.io/daocloud/kubeadm-operator - newTag: v0.0.2 + newTag: v0.0.3-dev diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 6a8b0c5..a36255b 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -39,7 +39,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: daocloud.io/daocloud/kubeadm-operator:v0.0.2 + image: daocloud.io/daocloud/kubeadm-operator:v0.0.3-dev name: manager resources: limits: From 54d84d57699f259bbab5358b424b21d23ccd1843 Mon Sep 17 00:00:00 2001 From: Paco Xu Date: Thu, 19 May 2022 18:57:00 +0800 Subject: [PATCH 2/3] init commit for upgrade specs with version/dryrun --- api/v1alpha1/command_descriptor_types.go | 21 ++++++-- .../operator_v1alpha1_runtimetaskgroup.yaml | 2 +- controllers/util.go | 2 +- operations/renewcertificates.go | 2 +- operations/upgrade.go | 42 +++++++++++---- operations/util.go | 51 +++++++++++++++++-- 6 files changed, 98 insertions(+), 22 deletions(-) diff --git a/api/v1alpha1/command_descriptor_types.go b/api/v1alpha1/command_descriptor_types.go index cba0ed3..027ff0d 100644 --- a/api/v1alpha1/command_descriptor_types.go +++ b/api/v1alpha1/command_descriptor_types.go @@ -19,7 +19,6 @@ package v1alpha1 // CommandDescriptor represents a command to be performed. // Only one of its members may be specified. type CommandDescriptor struct { - // +optional KubeadmRenewCertificates *KubeadmRenewCertsCommandSpec `json:"kubeadmRenewCertificates,omitempty"` @@ -66,6 +65,10 @@ type PreflightCommandSpec struct { // UpgradeKubeadmCommandSpec provides... type UpgradeKubeadmCommandSpec struct { + // +optional + // KubernetesVersion specifies the target kubernetes version + // If the version is empty, we will skip this command. + KubernetesVersion string `json:"kubernetesVersion"` // INSERT ADDITIONAL SPEC FIELDS - // Important: Run "make" to regenerate code after modifying this file @@ -73,14 +76,17 @@ type UpgradeKubeadmCommandSpec struct { // KubeadmUpgradeApplyCommandSpec provides... type KubeadmUpgradeApplyCommandSpec struct { - - // INSERT ADDITIONAL SPEC FIELDS - - // Important: Run "make" to regenerate code after modifying this file + // +optional + // KubernetesVersion specifies the target kubernetes version + // If the version is empty, we will skip this command. + KubernetesVersion string `json:"kubernetesVersion"` + // for dry run mode + DryRun bool `json:"dryRun,omitempty"` } +// TODO download the specified version bin and replace it in the node // KubeadmUpgradeNodeCommandSpec provides... type KubeadmUpgradeNodeCommandSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - // Important: Run "make" to regenerate code after modifying this file } @@ -99,8 +105,13 @@ type KubectlUncordonCommandSpec struct { // Important: Run "make" to regenerate code after modifying this file } +// TODO download the specified version bin and replace it in the node // UpgradeKubeletAndKubeactlCommandSpec provides... type UpgradeKubeletAndKubeactlCommandSpec struct { + // +optional + // KubernetesVersion specifies the target kubernetes version + // If the version is empty, we will skip this command. + KubernetesVersion string `json:"kubernetesVersion"` // INSERT ADDITIONAL SPEC FIELDS - // Important: Run "make" to regenerate code after modifying this file diff --git a/config/samples/operator_v1alpha1_runtimetaskgroup.yaml b/config/samples/operator_v1alpha1_runtimetaskgroup.yaml index 481c3d2..137489b 100644 --- a/config/samples/operator_v1alpha1_runtimetaskgroup.yaml +++ b/config/samples/operator_v1alpha1_runtimetaskgroup.yaml @@ -5,7 +5,7 @@ metadata: spec: nodeSelector: matchLabels: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" selector: matchLabels: app: a diff --git a/controllers/util.go b/controllers/util.go index f6c397d..a203449 100644 --- a/controllers/util.go +++ b/controllers/util.go @@ -119,7 +119,7 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace Spec: corev1.PodSpec{ Tolerations: []corev1.Toleration{ { - Key: "node-role.kubernetes.io/master", + Key: "node-role.kubernetes.io/control-plane", Effect: corev1.TaintEffectNoSchedule, }, }, diff --git a/operations/renewcertificates.go b/operations/renewcertificates.go index 568a61a..403ecb6 100644 --- a/operations/renewcertificates.go +++ b/operations/renewcertificates.go @@ -22,7 +22,7 @@ import ( func setupRenewCertificates() map[string]string { return map[string]string{ - "node-role.kubernetes.io/master": "", + "node-role.kubernetes.io/control-plane": "", } } diff --git a/operations/upgrade.go b/operations/upgrade.go index e606684..efa73e7 100644 --- a/operations/upgrade.go +++ b/operations/upgrade.go @@ -26,52 +26,74 @@ func setupUpgrade() map[string]string { func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperationSpec) *operatorv1.RuntimeTaskGroupList { var items []operatorv1.RuntimeTaskGroup + dryRun := operation.Spec.GetTypedOperationExecutionMode() == operatorv1.OperationExecutionModeDryRun - t1 := createBasicTaskGroup(operation, "01", "upgrade-cp-1") + t1 := createUpgradeApplyTaskGroup(operation, "01", "upgrade-apply") setCP1Selector(&t1) + // run `upgrade apply`` on the first node of all control plane t1.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterHead) t1.Spec.Template.Spec.Commands = append(t1.Spec.Template.Spec.Commands, operatorv1.CommandDescriptor{ - UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{}, + UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, operatorv1.CommandDescriptor{ - KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{}, + KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{ + DryRun: dryRun, + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, operatorv1.CommandDescriptor{ - UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{}, + UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, ) items = append(items, t1) - t2 := createBasicTaskGroup(operation, "02", "upgrade-cp-n") - setCPNSelector(&t2) + // this can be skipped if there is only one control-plane node. + // currently it depends on the selector + t2 := createBasicTaskGroup(operation, "02", "upgrade-cp") + setWSelector(&t2) t2.Spec.Template.Spec.Commands = append(t2.Spec.Template.Spec.Commands, operatorv1.CommandDescriptor{ - UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{}, + UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, operatorv1.CommandDescriptor{ KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{}, }, operatorv1.CommandDescriptor{ - UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{}, + UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, ) items = append(items, t2) + // this can be skipped if there are no worker nodes. + // currently it depends on the selector t3 := createBasicTaskGroup(operation, "02", "upgrade-w") setWSelector(&t3) + t3.Spec.Template.Spec.Commands = append(t3.Spec.Template.Spec.Commands, operatorv1.CommandDescriptor{ KubectlDrain: &operatorv1.KubectlDrainCommandSpec{}, }, operatorv1.CommandDescriptor{ - UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{}, + UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, operatorv1.CommandDescriptor{ KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{}, }, operatorv1.CommandDescriptor{ - UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{}, + UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{ + KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion, + }, }, operatorv1.CommandDescriptor{ KubectlUncordon: &operatorv1.KubectlUncordonCommandSpec{}, diff --git a/operations/util.go b/operations/util.go index c533dea..3d80480 100644 --- a/operations/util.go +++ b/operations/util.go @@ -24,6 +24,47 @@ import ( operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) +func createUpgradeApplyTaskGroup(operation *operatorv1.Operation, taskdeploymentOrder string, taskdeploymentName string) operatorv1.RuntimeTaskGroup { + dryRun := operation.Spec.GetTypedOperationExecutionMode() == operatorv1.OperationExecutionModeDryRun + gv := operatorv1.GroupVersion + + labels := map[string]string{} + for k, v := range operation.Labels { + labels[k] = v + } + labels[operatorv1.TaskGroupNameLabel] = taskdeploymentName + labels[operatorv1.TaskGroupOrderLabel] = taskdeploymentOrder + + return operatorv1.RuntimeTaskGroup{ + TypeMeta: metav1.TypeMeta{ + Kind: gv.WithKind("TaskGroup").Kind, + APIVersion: gv.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", operation.Name, taskdeploymentOrder, taskdeploymentName), //TODO: GeneratedName? + Labels: labels, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(operation, operation.GroupVersionKind())}, + }, + Spec: operatorv1.RuntimeTaskGroupSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: operatorv1.RuntimeTaskTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + CreationTimestamp: metav1.Now(), + }, + Spec: operatorv1.RuntimeTaskSpec{ + Commands: []operatorv1.CommandDescriptor{}, + }, + }, + }, + Status: operatorv1.RuntimeTaskGroupStatus{ + Phase: string(operatorv1.OperationPhasePending), + }, + } +} + func createBasicTaskGroup(operation *operatorv1.Operation, taskdeploymentOrder string, taskdeploymentName string) operatorv1.RuntimeTaskGroup { gv := operatorv1.GroupVersion @@ -64,10 +105,12 @@ func createBasicTaskGroup(operation *operatorv1.Operation, taskdeploymentOrder s } } +// use control-plane since v1.20 +// to support older version, we can manually label `master` node with control-plane. func setCPSelector(t *operatorv1.RuntimeTaskGroup) { t.Spec.NodeSelector = metav1.LabelSelector{ MatchLabels: map[string]string{ - "node-role.kubernetes.io/master": "", + "node-role.kubernetes.io/control-plane": "", }, } } @@ -75,7 +118,7 @@ func setCPSelector(t *operatorv1.RuntimeTaskGroup) { func setCP1Selector(t *operatorv1.RuntimeTaskGroup) { t.Spec.NodeSelector = metav1.LabelSelector{ MatchLabels: map[string]string{ - "node-role.kubernetes.io/master": "", + "node-role.kubernetes.io/control-plane": "", }, } t.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterHead) @@ -84,7 +127,7 @@ func setCP1Selector(t *operatorv1.RuntimeTaskGroup) { func setCPNSelector(t *operatorv1.RuntimeTaskGroup) { t.Spec.NodeSelector = metav1.LabelSelector{ MatchLabels: map[string]string{ - "node-role.kubernetes.io/master": "", + "node-role.kubernetes.io/control-plane": "", }, } t.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterTail) @@ -94,7 +137,7 @@ func setWSelector(t *operatorv1.RuntimeTaskGroup) { t.Spec.NodeSelector = metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { - Key: "node-role.kubernetes.io/master", + Key: "node-role.kubernetes.io/control-plane", Operator: metav1.LabelSelectorOpDoesNotExist, }, }, From 92b72d0df483090bb4247c856f88de1d9294683a Mon Sep 17 00:00:00 2001 From: Paco Xu Date: Thu, 19 May 2022 21:44:23 +0800 Subject: [PATCH 3/3] add some todo for runtime tasks --- commands/kubeadm_upgrade_apply.go | 1 + commands/kubeadm_upgrade_node.go | 1 + commands/kubectl_drain.go | 1 + commands/kubectl_uncordon.go | 1 + commands/upgrade_kubeadm.go | 1 + commands/upgrade_kubectlkubelet.go | 1 + controllers/operation_controller.go | 1 + controllers/runtimetask_controller.go | 2 +- 8 files changed, 8 insertions(+), 1 deletion(-) diff --git a/commands/kubeadm_upgrade_apply.go b/commands/kubeadm_upgrade_apply.go index 630e67e..5efee9b 100644 --- a/commands/kubeadm_upgrade_apply.go +++ b/commands/kubeadm_upgrade_apply.go @@ -22,6 +22,7 @@ import ( operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) +// TODO this is a temporary hack to get the "kubeadm upgrade apply" to work func runKubeadmUpgradeApply(spec *operatorv1.KubeadmUpgradeApplyCommandSpec, log logr.Logger) error { return nil } diff --git a/commands/kubeadm_upgrade_node.go b/commands/kubeadm_upgrade_node.go index caefe67..4d9ba11 100644 --- a/commands/kubeadm_upgrade_node.go +++ b/commands/kubeadm_upgrade_node.go @@ -22,6 +22,7 @@ import ( operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) +// TODO this is a temporary hack to get the "kubeadm upgrade node" to work func runKubeadmUpgradeNode(spec *operatorv1.KubeadmUpgradeNodeCommandSpec, log logr.Logger) error { return nil } diff --git a/commands/kubectl_drain.go b/commands/kubectl_drain.go index 54db2f6..4905de7 100644 --- a/commands/kubectl_drain.go +++ b/commands/kubectl_drain.go @@ -22,6 +22,7 @@ import ( operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) +// TODO this is a temporary hack to get the "kubectl drain" to work func runKubectlDrain(spec *operatorv1.KubectlDrainCommandSpec, log logr.Logger) error { return nil } diff --git a/commands/kubectl_uncordon.go b/commands/kubectl_uncordon.go index 82af0c5..a7cdfab 100644 --- a/commands/kubectl_uncordon.go +++ b/commands/kubectl_uncordon.go @@ -22,6 +22,7 @@ import ( operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) +// TODO this is a temporary hack to get the "kubectl uncordon" to work func runKubectlUncordon(spec *operatorv1.KubectlUncordonCommandSpec, log logr.Logger) error { return nil } diff --git a/commands/upgrade_kubeadm.go b/commands/upgrade_kubeadm.go index 74bf090..841406e 100644 --- a/commands/upgrade_kubeadm.go +++ b/commands/upgrade_kubeadm.go @@ -22,6 +22,7 @@ import ( operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) +// TODO this is a temporary hack to get the upgrading kubeadm to work func runUpgradeKubeadm(spec *operatorv1.UpgradeKubeadmCommandSpec, log logr.Logger) error { return nil } diff --git a/commands/upgrade_kubectlkubelet.go b/commands/upgrade_kubectlkubelet.go index 40145a7..27e4625 100644 --- a/commands/upgrade_kubectlkubelet.go +++ b/commands/upgrade_kubectlkubelet.go @@ -22,6 +22,7 @@ import ( operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1" ) +// TODO this is a temporary hack to get the kubectl & kubelet upgrade to work func runUpgradeKubectlAndKubelet(spec *operatorv1.UpgradeKubeletAndKubeactlCommandSpec, log logr.Logger) error { return nil } diff --git a/controllers/operation_controller.go b/controllers/operation_controller.go index 23b1f44..fecbc94 100644 --- a/controllers/operation_controller.go +++ b/controllers/operation_controller.go @@ -131,6 +131,7 @@ func (r *OperationReconciler) reconcileDaemonSet(operation *operatorv1.Operation return nil } + // daemonset will be created only if the operation is in controlled mode. if !daemonSetShouldBeRunning(operation) { return nil } diff --git a/controllers/runtimetask_controller.go b/controllers/runtimetask_controller.go index 0630d4a..55a00a6 100644 --- a/controllers/runtimetask_controller.go +++ b/controllers/runtimetask_controller.go @@ -237,10 +237,10 @@ func (r *RuntimeTaskReconciler) reconcileNormal(executionMode operatorv1.Operati } // Proceed with the current command execution - if executionMode == operatorv1.OperationExecutionModeDryRun { // if dry running wait for an arbitrary delay so the user will get a better perception of the Task execution order time.Sleep(3 * time.Second) + // TODO should we collect log for dry-run? } else { // else we should execute the CurrentCommand log.WithValues("command", task.Status.CurrentCommand).Info("running command")