Skip to content
This repository has been archived by the owner on Sep 15, 2023. It is now read-only.

Commit

Permalink
Merge pull request #13 from pacoxu/dev-0.0.3
Browse files Browse the repository at this point in the history
Dev 0.0.3
  • Loading branch information
pacoxu authored May 19, 2022
2 parents be14d99 + 92b72d0 commit 6a64d3d
Show file tree
Hide file tree
Showing 17 changed files with 109 additions and 26 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@

# Image URL to use all building/pushing image targets
IMG ?= daocloud.io/daocloud/kubeadm-operator:v0.0.2
IMG ?= daocloud.io/daocloud/kubeadm-operator:v0.0.3-dev
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd"

Expand Down
21 changes: 16 additions & 5 deletions api/v1alpha1/command_descriptor_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package v1alpha1
// CommandDescriptor represents a command to be performed.
// Only one of its members may be specified.
type CommandDescriptor struct {

// +optional
KubeadmRenewCertificates *KubeadmRenewCertsCommandSpec `json:"kubeadmRenewCertificates,omitempty"`

Expand Down Expand Up @@ -66,21 +65,28 @@ type PreflightCommandSpec struct {

// UpgradeKubeadmCommandSpec provides...
type UpgradeKubeadmCommandSpec struct {
// +optional
// KubernetesVersion specifies the target kubernetes version
// If the version is empty, we will skip this command.
KubernetesVersion string `json:"kubernetesVersion"`

// INSERT ADDITIONAL SPEC FIELDS -
// Important: Run "make" to regenerate code after modifying this file
}

// KubeadmUpgradeApplyCommandSpec provides...
type KubeadmUpgradeApplyCommandSpec struct {

// INSERT ADDITIONAL SPEC FIELDS -
// Important: Run "make" to regenerate code after modifying this file
// +optional
// KubernetesVersion specifies the target kubernetes version
// If the version is empty, we will skip this command.
KubernetesVersion string `json:"kubernetesVersion"`
// for dry run mode
DryRun bool `json:"dryRun,omitempty"`
}

// TODO download the specified version bin and replace it in the node
// KubeadmUpgradeNodeCommandSpec provides...
type KubeadmUpgradeNodeCommandSpec struct {

// INSERT ADDITIONAL SPEC FIELDS -
// Important: Run "make" to regenerate code after modifying this file
}
Expand All @@ -99,8 +105,13 @@ type KubectlUncordonCommandSpec struct {
// Important: Run "make" to regenerate code after modifying this file
}

// TODO download the specified version bin and replace it in the node
// UpgradeKubeletAndKubeactlCommandSpec provides...
type UpgradeKubeletAndKubeactlCommandSpec struct {
// +optional
// KubernetesVersion specifies the target kubernetes version
// If the version is empty, we will skip this command.
KubernetesVersion string `json:"kubernetesVersion"`

// INSERT ADDITIONAL SPEC FIELDS -
// Important: Run "make" to regenerate code after modifying this file
Expand Down
1 change: 1 addition & 0 deletions commands/kubeadm_upgrade_apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

// TODO this is a temporary hack to get the "kubeadm upgrade apply" to work
func runKubeadmUpgradeApply(spec *operatorv1.KubeadmUpgradeApplyCommandSpec, log logr.Logger) error {
return nil
}
1 change: 1 addition & 0 deletions commands/kubeadm_upgrade_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

// TODO this is a temporary hack to get the "kubeadm upgrade node" to work
func runKubeadmUpgradeNode(spec *operatorv1.KubeadmUpgradeNodeCommandSpec, log logr.Logger) error {
return nil
}
1 change: 1 addition & 0 deletions commands/kubectl_drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

// TODO this is a temporary hack to get the "kubectl drain" to work
func runKubectlDrain(spec *operatorv1.KubectlDrainCommandSpec, log logr.Logger) error {
return nil
}
1 change: 1 addition & 0 deletions commands/kubectl_uncordon.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

// TODO this is a temporary hack to get the "kubectl uncordon" to work
func runKubectlUncordon(spec *operatorv1.KubectlUncordonCommandSpec, log logr.Logger) error {
return nil
}
1 change: 1 addition & 0 deletions commands/upgrade_kubeadm.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

// TODO this is a temporary hack to get the upgrading kubeadm to work
func runUpgradeKubeadm(spec *operatorv1.UpgradeKubeadmCommandSpec, log logr.Logger) error {
return nil
}
1 change: 1 addition & 0 deletions commands/upgrade_kubectlkubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

// TODO this is a temporary hack to get the kubectl & kubelet upgrade to work
func runUpgradeKubectlAndKubelet(spec *operatorv1.UpgradeKubeletAndKubeactlCommandSpec, log logr.Logger) error {
return nil
}
2 changes: 1 addition & 1 deletion config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ kind: Kustomization
images:
- name: controller
newName: daocloud.io/daocloud/kubeadm-operator
newTag: v0.0.2
newTag: v0.0.3-dev
2 changes: 1 addition & 1 deletion config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: daocloud.io/daocloud/kubeadm-operator:v0.0.2
image: daocloud.io/daocloud/kubeadm-operator:v0.0.3-dev
name: manager
resources:
limits:
Expand Down
2 changes: 1 addition & 1 deletion config/samples/operator_v1alpha1_runtimetaskgroup.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
spec:
nodeSelector:
matchLabels:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
selector:
matchLabels:
app: a
Expand Down
1 change: 1 addition & 0 deletions controllers/operation_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ func (r *OperationReconciler) reconcileDaemonSet(operation *operatorv1.Operation
return nil
}

// daemonset will be created only if the operation is in controlled mode.
if !daemonSetShouldBeRunning(operation) {
return nil
}
Expand Down
2 changes: 1 addition & 1 deletion controllers/runtimetask_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,10 +237,10 @@ func (r *RuntimeTaskReconciler) reconcileNormal(executionMode operatorv1.Operati
}

// Proceed with the current command execution

if executionMode == operatorv1.OperationExecutionModeDryRun {
// if dry running wait for an arbitrary delay so the user will get a better perception of the Task execution order
time.Sleep(3 * time.Second)
// TODO should we collect log for dry-run?
} else {
// else we should execute the CurrentCommand
log.WithValues("command", task.Status.CurrentCommand).Info("running command")
Expand Down
2 changes: 1 addition & 1 deletion controllers/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func createDaemonSet(c client.Client, operation *operatorv1.Operation, namespace
Spec: corev1.PodSpec{
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Key: "node-role.kubernetes.io/control-plane",
Effect: corev1.TaintEffectNoSchedule,
},
},
Expand Down
2 changes: 1 addition & 1 deletion operations/renewcertificates.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (

func setupRenewCertificates() map[string]string {
return map[string]string{
"node-role.kubernetes.io/master": "",
"node-role.kubernetes.io/control-plane": "",
}
}

Expand Down
42 changes: 32 additions & 10 deletions operations/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,52 +26,74 @@ func setupUpgrade() map[string]string {

func planUpgrade(operation *operatorv1.Operation, spec *operatorv1.UpgradeOperationSpec) *operatorv1.RuntimeTaskGroupList {
var items []operatorv1.RuntimeTaskGroup
dryRun := operation.Spec.GetTypedOperationExecutionMode() == operatorv1.OperationExecutionModeDryRun

t1 := createBasicTaskGroup(operation, "01", "upgrade-cp-1")
t1 := createUpgradeApplyTaskGroup(operation, "01", "upgrade-apply")
setCP1Selector(&t1)
// run `upgrade apply`` on the first node of all control plane
t1.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterHead)
t1.Spec.Template.Spec.Commands = append(t1.Spec.Template.Spec.Commands,
operatorv1.CommandDescriptor{
UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{},
UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{},
KubeadmUpgradeApply: &operatorv1.KubeadmUpgradeApplyCommandSpec{
DryRun: dryRun,
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
},
},
operatorv1.CommandDescriptor{
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{},
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
},
},
)
items = append(items, t1)

t2 := createBasicTaskGroup(operation, "02", "upgrade-cp-n")
setCPNSelector(&t2)
// this can be skipped if there is only one control-plane node.
// currently it depends on the selector
t2 := createBasicTaskGroup(operation, "02", "upgrade-cp")
setWSelector(&t2)
t2.Spec.Template.Spec.Commands = append(t2.Spec.Template.Spec.Commands,
operatorv1.CommandDescriptor{
UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{},
UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{},
},
operatorv1.CommandDescriptor{
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{},
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
},
},
)
items = append(items, t2)

// this can be skipped if there are no worker nodes.
// currently it depends on the selector
t3 := createBasicTaskGroup(operation, "02", "upgrade-w")
setWSelector(&t3)

t3.Spec.Template.Spec.Commands = append(t3.Spec.Template.Spec.Commands,
operatorv1.CommandDescriptor{
KubectlDrain: &operatorv1.KubectlDrainCommandSpec{},
},
operatorv1.CommandDescriptor{
UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{},
UpgradeKubeadm: &operatorv1.UpgradeKubeadmCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
},
},
operatorv1.CommandDescriptor{
KubeadmUpgradeNode: &operatorv1.KubeadmUpgradeNodeCommandSpec{},
},
operatorv1.CommandDescriptor{
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{},
UpgradeKubeletAndKubeactl: &operatorv1.UpgradeKubeletAndKubeactlCommandSpec{
KubernetesVersion: operation.Spec.Upgrade.KubernetesVersion,
},
},
operatorv1.CommandDescriptor{
KubectlUncordon: &operatorv1.KubectlUncordonCommandSpec{},
Expand Down
51 changes: 47 additions & 4 deletions operations/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,47 @@ import (
operatorv1 "k8s.io/kubeadm/operator/api/v1alpha1"
)

func createUpgradeApplyTaskGroup(operation *operatorv1.Operation, taskdeploymentOrder string, taskdeploymentName string) operatorv1.RuntimeTaskGroup {
dryRun := operation.Spec.GetTypedOperationExecutionMode() == operatorv1.OperationExecutionModeDryRun
gv := operatorv1.GroupVersion

labels := map[string]string{}
for k, v := range operation.Labels {
labels[k] = v
}
labels[operatorv1.TaskGroupNameLabel] = taskdeploymentName
labels[operatorv1.TaskGroupOrderLabel] = taskdeploymentOrder

return operatorv1.RuntimeTaskGroup{
TypeMeta: metav1.TypeMeta{
Kind: gv.WithKind("TaskGroup").Kind,
APIVersion: gv.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s-%s", operation.Name, taskdeploymentOrder, taskdeploymentName), //TODO: GeneratedName?
Labels: labels,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(operation, operation.GroupVersionKind())},
},
Spec: operatorv1.RuntimeTaskGroupSpec{
Selector: metav1.LabelSelector{
MatchLabels: labels,
},
Template: operatorv1.RuntimeTaskTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
CreationTimestamp: metav1.Now(),
},
Spec: operatorv1.RuntimeTaskSpec{
Commands: []operatorv1.CommandDescriptor{},
},
},
},
Status: operatorv1.RuntimeTaskGroupStatus{
Phase: string(operatorv1.OperationPhasePending),
},
}
}

func createBasicTaskGroup(operation *operatorv1.Operation, taskdeploymentOrder string, taskdeploymentName string) operatorv1.RuntimeTaskGroup {
gv := operatorv1.GroupVersion

Expand Down Expand Up @@ -64,18 +105,20 @@ func createBasicTaskGroup(operation *operatorv1.Operation, taskdeploymentOrder s
}
}

// use control-plane since v1.20
// to support older version, we can manually label `master` node with control-plane.
func setCPSelector(t *operatorv1.RuntimeTaskGroup) {
t.Spec.NodeSelector = metav1.LabelSelector{
MatchLabels: map[string]string{
"node-role.kubernetes.io/master": "",
"node-role.kubernetes.io/control-plane": "",
},
}
}

func setCP1Selector(t *operatorv1.RuntimeTaskGroup) {
t.Spec.NodeSelector = metav1.LabelSelector{
MatchLabels: map[string]string{
"node-role.kubernetes.io/master": "",
"node-role.kubernetes.io/control-plane": "",
},
}
t.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterHead)
Expand All @@ -84,7 +127,7 @@ func setCP1Selector(t *operatorv1.RuntimeTaskGroup) {
func setCPNSelector(t *operatorv1.RuntimeTaskGroup) {
t.Spec.NodeSelector = metav1.LabelSelector{
MatchLabels: map[string]string{
"node-role.kubernetes.io/master": "",
"node-role.kubernetes.io/control-plane": "",
},
}
t.Spec.NodeFilter = string(operatorv1.RuntimeTaskGroupNodeFilterTail)
Expand All @@ -94,7 +137,7 @@ func setWSelector(t *operatorv1.RuntimeTaskGroup) {
t.Spec.NodeSelector = metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "node-role.kubernetes.io/master",
Key: "node-role.kubernetes.io/control-plane",
Operator: metav1.LabelSelectorOpDoesNotExist,
},
},
Expand Down

0 comments on commit 6a64d3d

Please sign in to comment.