Skip to content

Commit

Permalink
check existence of the same label from node-agent
Browse files Browse the repository at this point in the history
Signed-off-by: Lyndon-Li <[email protected]>
  • Loading branch information
Lyndon-Li committed Dec 9, 2024
1 parent d914702 commit 04cc78d
Show file tree
Hide file tree
Showing 9 changed files with 191 additions and 19 deletions.
2 changes: 1 addition & 1 deletion changelogs/unreleased/8487-Lyndon-Li
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Fix issue #8433, add aks label to data mover pods
Fix issue #8433, add third party labels to data mover pods when the same labels exist in node-agent pods
9 changes: 9 additions & 0 deletions pkg/controller/data_download_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ import (
"github.com/vmware-tanzu/velero/pkg/datapath"
"github.com/vmware-tanzu/velero/pkg/exposer"
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)

Expand Down Expand Up @@ -178,6 +180,13 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
}

hostingPodLabels := map[string]string{velerov1api.DataDownloadLabel: dd.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(ctx, r.kubeClient, dd.Namespace, k); err != nil {
log.WithError(err).Warnf("Failed to check existence of label from node-agent, skip adding label %s", k)
} else {
hostingPodLabels[k] = v
}

Check warning on line 188 in pkg/controller/data_download_controller.go

View check run for this annotation

Codecov / codecov/patch

pkg/controller/data_download_controller.go#L187-L188

Added lines #L187 - L188 were not covered by tests
}

// Expose() will trigger to create one pod whose volume is restored by a given volume snapshot,
// but the pod maybe is not in the same node of the current controller, so we need to return it here.
Expand Down
12 changes: 11 additions & 1 deletion pkg/controller/data_upload_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/metrics"
"github.com/vmware-tanzu/velero/pkg/nodeagent"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)

Expand Down Expand Up @@ -806,11 +807,20 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload
accessMode = exposer.AccessModeBlock
}

hostingPodLabels := map[string]string{velerov1api.DataUploadLabel: du.Name}
for _, k := range util.ThirdPartyLabels {
if v, err := nodeagent.GetLabelValue(context.Background(), r.kubeClient, du.Namespace, k); err != nil {

Check warning on line 812 in pkg/controller/data_upload_controller.go

View check run for this annotation

Codecov / codecov/patch

pkg/controller/data_upload_controller.go#L811-L812

Added lines #L811 - L812 were not covered by tests
r.logger.WithError(err).Warnf("Failed to check existence of label from node-agent, skip adding label %s", k)
} else {
hostingPodLabels[k] = v
}
}

return &exposer.CSISnapshotExposeParam{
SnapshotName: du.Spec.CSISnapshot.VolumeSnapshot,
SourceNamespace: du.Spec.SourceNamespace,
StorageClass: du.Spec.CSISnapshot.StorageClass,
HostingPodLabels: map[string]string{velerov1api.DataUploadLabel: du.Name},
HostingPodLabels: hostingPodLabels,
AccessMode: accessMode,
OperationTimeout: du.Spec.OperationTimeout.Duration,
ExposeTimeout: r.preparingTimeout,
Expand Down
4 changes: 0 additions & 4 deletions pkg/exposer/csi_snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -488,10 +488,6 @@ func (e *csiSnapshotExposer) createBackupPod(
}
label[podGroupLabel] = podGroupSnapshot

for k, v := range thirdPartyLabels {
label[k] = v
}

volumeMode := corev1.PersistentVolumeFilesystem
if backupPVC.Spec.VolumeMode != nil {
volumeMode = *backupPVC.Spec.VolumeMode
Expand Down
8 changes: 0 additions & 8 deletions pkg/exposer/generic_restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,14 +323,6 @@ func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObjec
}}
volumes = append(volumes, podInfo.volumes...)

if label == nil {
label = make(map[string]string)
}

for k, v := range thirdPartyLabels {
label[k] = v
}

volumeMode := corev1.PersistentVolumeFilesystem
if targetPVC.Spec.VolumeMode != nil {
volumeMode = *targetPVC.Spec.VolumeMode
Expand Down
4 changes: 0 additions & 4 deletions pkg/exposer/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,3 @@ type ExposeByPod struct {
HostingContainer string
VolumeName string
}

var thirdPartyLabels map[string]string = map[string]string{
"azure.workload.identity/use": "true",
}
21 changes: 20 additions & 1 deletion pkg/nodeagent/node_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ const (
)

var (
ErrDaemonSetNotFound = errors.New("daemonset not found")
ErrDaemonSetNotFound = errors.New("daemonset not found")
ErrNodeAgentLabelNotFound = errors.New("node-agent label not found")
)

type LoadConcurrency struct {
Expand Down Expand Up @@ -161,3 +162,21 @@ func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Int

return configs, nil
}

func GetLabelValue(ctx context.Context, kubeClient kubernetes.Interface, namespace string, key string) (string, error) {
ds, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{})
if err != nil {
return "", errors.Wrap(err, "error getting node-agent daemonset")
}

if ds.Spec.Template.Labels == nil {
return "", ErrNodeAgentLabelNotFound
}

val, found := ds.Spec.Template.Labels[key]
if !found {
return "", ErrNodeAgentLabelNotFound
}

return val, nil
}
129 changes: 129 additions & 0 deletions pkg/nodeagent/node_agent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -331,3 +331,132 @@ func TestGetConfigs(t *testing.T) {
})
}
}

func TestGetLabelValue(t *testing.T) {
daemonSet := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
}

daemonSetWithOtherLabel := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"fake-other-label": "fake-value-1",
},
},
},
},
}

daemonSetWithLabel := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"fake-label": "fake-value-2",
},
},
},
},
}

daemonSetWithEmptyLabel := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "fake-ns",
Name: "node-agent",
},
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"fake-label": "",
},
},
},
},
}

tests := []struct {
name string
kubeClientObj []runtime.Object
namespace string
expectedValue string
expectErr string
}{
{
name: "ds get error",
namespace: "fake-ns",
expectErr: "error getting node-agent daemonset: daemonsets.apps \"node-agent\" not found",
},
{
name: "no label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSet,
},
expectErr: ErrNodeAgentLabelNotFound.Error(),
},
{
name: "no expecting label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithOtherLabel,
},
expectErr: ErrNodeAgentLabelNotFound.Error(),
},
{
name: "expecting label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithLabel,
},
expectedValue: "fake-value-2",
},
{
name: "expecting empty label",
namespace: "fake-ns",
kubeClientObj: []runtime.Object{
daemonSetWithEmptyLabel,
},
expectedValue: "",
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...)

value, err := GetLabelValue(context.TODO(), fakeKubeClient, test.namespace, "fake-label")
if test.expectErr == "" {
assert.NoError(t, err)
assert.Equal(t, test.expectedValue, value)
} else {
assert.EqualError(t, err, test.expectErr)
}
})
}
}
21 changes: 21 additions & 0 deletions pkg/util/third_party.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package util

var ThirdPartyLabels []string = []string{
"azure.workload.identity/use",
}

0 comments on commit 04cc78d

Please sign in to comment.