diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index 5aaeea6af..22d6d70d6 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -915,6 +915,7 @@ func (r *Reconciler) setResourcePlacementStatusAndResourceConditions(ctx context // TODO: we could improve the message by summarizing the failure reasons from all of the unselected clusters. // For now, it starts from adding some sample failures of unselected clusters. var rp fleetv1beta1.ResourcePlacementStatus + rp.ClusterName = unselected[i].ClusterName scheduledCondition := metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceScheduledConditionType), diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index 09b63ff6d..cce3bd115 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -780,6 +780,7 @@ func TestSetPlacementStatus(t *testing.T) { FailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{}, }, { + ClusterName: "member-2", Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, @@ -790,6 +791,7 @@ func TestSetPlacementStatus(t *testing.T) { }, }, { + ClusterName: "member-3", Conditions: []metav1.Condition{ { Status: metav1.ConditionFalse, diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 5a68d9835..6f3b1e9cc 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -60,7 +60,7 @@ func validateConfigMapOnCluster(cluster *framework.Cluster, name types.Namespace ignoreObjectMetaAutoGeneratedFields, ignoreObjectMetaAnnotationField, ); diff != "" { - return fmt.Errorf("app deployment diff (-got, +want): %s", diff) + return fmt.Errorf("app config map diff (-got, +want): %s", diff) } return nil @@ -79,6 +79,26 @@ func workNamespaceAndConfigMapPlacedOnClusterActual(cluster *framework.Cluster) } } +func crpRolloutFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ + { + Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), + Status: metav1.ConditionFalse, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementSynchronizedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: generation, + }, + { + Type: string(placementv1beta1.ClusterResourcePlacementAppliedConditionType), + Status: metav1.ConditionTrue, + ObservedGeneration: generation, + }, + } +} + func crpRolloutCompletedConditions(generation int64) []metav1.Condition { return []metav1.Condition{ { @@ -119,58 +139,79 @@ func resourcePlacementRolloutCompletedConditions(generation int64) []metav1.Cond } } -func validateCRPStatus(name types.NamespacedName, wantSelectedResources []placementv1beta1.ResourceIdentifier) error { - crp := &placementv1beta1.ClusterResourcePlacement{} - if err := hubClient.Get(ctx, name, crp); err != nil { - return err - } - - wantCRPConditions := crpRolloutCompletedConditions(crp.Generation) - wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{ +func resourcePlacementRolloutFailedConditions(generation int64) []metav1.Condition { + return []metav1.Condition{ { - ClusterName: memberCluster1Name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation), + Type: string(placementv1beta1.ResourceScheduledConditionType), + Status: metav1.ConditionFalse, + ObservedGeneration: generation, }, + } +} + +func workResourceIdentifiers() []placementv1beta1.ResourceIdentifier { + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) + + return []placementv1beta1.ResourceIdentifier{ { - ClusterName: memberCluster2Name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation), + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", }, { - ClusterName: memberCluster3Name, - Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation), + Kind: "ConfigMap", + Name: appConfigMapName, + Version: "v1", + Namespace: workNamespaceName, }, } - wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ - Conditions: wantCRPConditions, - PlacementStatuses: wantPlacementStatus, - SelectedResources: wantSelectedResources, - } - if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) - } - return nil } -func crpStatusUpdatedActual() func() error { +func crpStatusUpdatedActual( + wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier, + wantSelectedClusters, wantUnselectedClusters []string, +) func() error { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) return func() error { - wantSelectedResources := []placementv1beta1.ResourceIdentifier{ - { - Kind: "Namespace", - Name: workNamespaceName, - Version: "v1", - }, - { - Kind: "ConfigMap", - Name: appConfigMapName, - Version: "v1", - Namespace: workNamespaceName, - }, + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{} + for _, name := range wantSelectedClusters { + wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ + ClusterName: name, + Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation), + }) + } + for _, name := range wantUnselectedClusters { + wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ + ClusterName: name, + Conditions: resourcePlacementRolloutFailedConditions(crp.Generation), + }) } - return validateCRPStatus(types.NamespacedName{Name: crpName}, wantSelectedResources) + + wantCRPConditions := crpRolloutCompletedConditions(crp.Generation) + if len(wantUnselectedClusters) > 0 { + wantCRPConditions = crpRolloutFailedConditions(crp.Generation) + } + + // Note that the CRP controller will only keep decisions regarding unselected clusters for a CRP if: + // + // * The CRP is of the PickN placement type and the required N count cannot be fulfilled; or + // * The CRP is of the PickFixed placement type and the list of target clusters speciified cannot be fulfilled. + wantStatus := placementv1beta1.ClusterResourcePlacementStatus{ + Conditions: wantCRPConditions, + PlacementStatuses: wantPlacementStatus, + SelectedResources: wantSelectedResourceIdentifiers, + } + if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil } } diff --git a/test/e2e/placement_pickall_test.go b/test/e2e/placement_pickall_test.go new file mode 100644 index 000000000..3cbc8b6f2 --- /dev/null +++ b/test/e2e/placement_pickall_test.go @@ -0,0 +1,57 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" +) + +var _ = Describe("placing resources using a CRP with no placement policy specified", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: pointer.Int(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterAll(func() { + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) +}) diff --git a/test/e2e/placement_pickfixed_test.go b/test/e2e/placement_pickfixed_test.go new file mode 100644 index 000000000..899041396 --- /dev/null +++ b/test/e2e/placement_pickfixed_test.go @@ -0,0 +1,179 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/test/e2e/framework" +) + +var _ = Describe("placing resources using a CRP of PickFixed placement type", func() { + Context("pick some clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: pointer.Int(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1Name, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should place resources on specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster1Name}, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterAll(func() { + ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{memberCluster1}) + }) + }) + + Context("refreshing target clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: pointer.Int(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1Name, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + // Verify that resources are placed on specified clusters. + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + + // Update the CRP to pick a different cluster. + Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp)).To(Succeed(), "Failed to get CRP") + crp.Spec.Policy.ClusterNames = []string{memberCluster2Name} + Expect(hubClient.Update(ctx, crp)).To(Succeed(), "Failed to update CRP") + }) + + It("should place resources on newly specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster2) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should remove resources from previously specified clusters", func() { + resourceRemovedActual := workNamespaceRemovedFromClusterActual(memberCluster1) + Eventually(resourceRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove resources from previously specified clusters") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster2Name}, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterAll(func() { + ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{memberCluster2}) + }) + }) + + Context("pick unhealthy and non-existent clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: pointer.Int(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster4Name, + memberCluster5Name, + memberCluster6Name, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), nil, []string{memberCluster4Name, memberCluster5Name, memberCluster6Name}) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterAll(func() { + ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{}) + }) + }) +}) diff --git a/test/e2e/placement_selecting_resources_test.go b/test/e2e/placement_selecting_resources_test.go index cdf9e89c7..96e414ea3 100644 --- a/test/e2e/placement_selecting_resources_test.go +++ b/test/e2e/placement_selecting_resources_test.go @@ -52,7 +52,7 @@ var _ = Describe("creating CRP and selecting resources by name", Ordered, func() }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -119,7 +119,7 @@ var _ = Describe("creating CRP and selecting resources by label", Ordered, func( }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -191,9 +191,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become selected a }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := func() error { - return validateCRPStatus(types.NamespacedName{Name: crpName}, nil) - } + crpStatusUpdatedActual := crpStatusUpdatedActual([]placementv1beta1.ResourceIdentifier{}, allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -210,7 +208,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become selected a }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -282,7 +280,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -299,9 +297,7 @@ var _ = Describe("validating CRP when cluster-scoped resources become unselected }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := func() error { - return validateCRPStatus(types.NamespacedName{Name: crpName}, nil) - } + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -360,7 +356,7 @@ var _ = Describe("validating CRP when cluster-scoped and namespace-scoped resour }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -386,7 +382,7 @@ var _ = Describe("validating CRP when cluster-scoped and namespace-scoped resour It("should update the selected resources on member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -444,16 +440,14 @@ var _ = Describe("validating CRP when adding resources in a matching namespace", }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := func() error { - wantSelectedResources := []placementv1beta1.ResourceIdentifier{ - { - Kind: "Namespace", - Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), - Version: "v1", - }, - } - return validateCRPStatus(types.NamespacedName{Name: crpName}, wantSelectedResources) + wantSelectedResourceIdentifiers := []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + Version: "v1", + }, } + crpStatusUpdatedActual := crpStatusUpdatedActual(wantSelectedResourceIdentifiers, allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -475,7 +469,7 @@ var _ = Describe("validating CRP when adding resources in a matching namespace", }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -534,7 +528,7 @@ var _ = Describe("validating CRP when deleting resources in a matching namespace }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) @@ -546,16 +540,14 @@ var _ = Describe("validating CRP when deleting resources in a matching namespace }) It("should update CRP status as expected", func() { - crpStatusUpdatedActual := func() error { - wantSelectedResources := []placementv1beta1.ResourceIdentifier{ - { - Kind: "Namespace", - Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), - Version: "v1", - }, - } - return validateCRPStatus(types.NamespacedName{Name: crpName}, wantSelectedResources) + wantSelectedResourceIdentifiers := []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()), + Version: "v1", + }, } + crpStatusUpdatedActual := crpStatusUpdatedActual(wantSelectedResourceIdentifiers, allMemberClusterNames, nil) Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP %s status as expected", crpName) }) diff --git a/test/e2e/placement_test.go b/test/e2e/placement_test.go deleted file mode 100644 index a23da2eab..000000000 --- a/test/e2e/placement_test.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright (c) Microsoft Corporation. -Licensed under the MIT license. -*/ - -package e2e - -import ( - "fmt" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" - - placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" -) - -// Note that this container will run in parallel with other containers. -var _ = Describe("placing resources using a CRP with no placement policy specified", Ordered, func() { - crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) - workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess()) - - BeforeAll(func() { - // Create the resources. - createWorkResources() - - // Create the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - // Add a custom finalizer; this would allow us to better observe - // the behavior of the controllers. - Finalizers: []string{customDeletionBlockerFinalizer}, - }, - Spec: placementv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: workResourceSelector(), - Strategy: placementv1beta1.RolloutStrategy{ - Type: placementv1beta1.RollingUpdateRolloutStrategyType, - RollingUpdate: &placementv1beta1.RollingUpdateConfig{ - UnavailablePeriodSeconds: pointer.Int(2), - }, - }, - }, - } - Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") - }) - - It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) - - It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - }) - - It("can update the resource", func() { - // Get the config map. - configMap := &corev1.ConfigMap{} - Expect(hubClient.Get(ctx, types.NamespacedName{Namespace: workNamespaceName, Name: appConfigMapName}, configMap)).To(Succeed(), "Failed to get config map") - - configMap.Data = map[string]string{ - "data": "updated", - } - Expect(hubClient.Update(ctx, configMap)).To(Succeed(), "Failed to update config map") - }) - - It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) - - It("should update CRP status as expected", func() { - crpStatusUpdatedActual := crpStatusUpdatedActual() - Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") - }) - - It("can delete the CRP", func() { - // Delete the CRP. - crp := &placementv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: crpName, - }, - } - Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP") - }) - - It("should remove placed resources from all member clusters", checkIfRemovedWorkResourcesFromAllMemberClusters) - - It("should remove controller finalizers from CRP", func() { - finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual() - Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") - }) - - AfterAll(func() { - // Remove the custom deletion blocker finalizer from the CRP. - cleanupCRP(crpName) - - // Delete the created resources. - cleanupWorkResources() - }) -}) diff --git a/test/e2e/setup_test.go b/test/e2e/setup_test.go index 93d773a1b..e731d87da 100644 --- a/test/e2e/setup_test.go +++ b/test/e2e/setup_test.go @@ -10,6 +10,7 @@ import ( "fmt" "log" "os" + "sync" "testing" "time" @@ -39,6 +40,9 @@ const ( memberCluster1Name = "kind-cluster-1" memberCluster2Name = "kind-cluster-2" memberCluster3Name = "kind-cluster-3" + memberCluster4Name = "kind-unhealthy-cluster" + memberCluster5Name = "kind-left-cluster" + memberCluster6Name = "kind-non-existent-cluster" hubClusterSAName = "hub-agent-sa" fleetSystemNS = "fleet-system" @@ -47,13 +51,14 @@ const ( ) const ( - eventuallyDuration = time.Minute * 10 + eventuallyDuration = time.Second * 40 eventuallyInterval = time.Second * 5 ) var ( ctx = context.Background() scheme = runtime.NewScheme() + once = sync.Once{} hubCluster *framework.Cluster memberCluster1 *framework.Cluster @@ -65,7 +70,32 @@ var ( memberCluster2Client client.Client memberCluster3Client client.Client - allMemberClusters []*framework.Cluster + allMemberClusters []*framework.Cluster + allMemberClusterNames = []string{} +) + +var ( + regionLabelName = "region" + regionLabelValue1 = "east" + regionLabelValue2 = "west" + envLabelName = "env" + envLabelValue1 = "prod" + envLabelValue2 = "canary" + + labelsByClusterName = map[string]map[string]string{ + memberCluster1Name: { + regionLabelName: regionLabelValue1, + envLabelName: envLabelValue1, + }, + memberCluster2Name: { + regionLabelName: regionLabelValue1, + envLabelName: envLabelValue2, + }, + memberCluster3Name: { + regionLabelName: regionLabelValue2, + envLabelName: envLabelValue1, + }, + } ) var ( @@ -95,6 +125,7 @@ var ( cmpopts.SortSlices(lessFuncPlacementStatus), cmpopts.SortSlices(lessFuncResourceIdentifier), ignoreConditionLTTReasonAndMessageFields, + cmpopts.EquateEmpty(), } ) @@ -157,6 +188,13 @@ func beforeSuiteForAllProcesses() { Expect(memberCluster3Client).NotTo(BeNil(), "Failed to initialize client for accessing kubernetes cluster") allMemberClusters = []*framework.Cluster{memberCluster1, memberCluster2, memberCluster3} + once.Do(func() { + // Set these arrays only once; this is necessary as for the first spawned Ginkgo process, + // the `beforeSuiteForAllProcesses` function is called twice. + for _, cluster := range allMemberClusters { + allMemberClusterNames = append(allMemberClusterNames, cluster.ClusterName) + } + }) } func beforeSuiteForProcess1() { @@ -164,6 +202,11 @@ func beforeSuiteForProcess1() { setAllMemberClustersToJoin() checkIfAllMemberClustersHaveJoined() + + // Simulate that member cluster 4 become unhealthy, and member cluster 5 has left the fleet. + // + // Note that these clusters are not real kind clusters. + setupInvalidClusters() } var _ = SynchronizedBeforeSuite(beforeSuiteForProcess1, beforeSuiteForAllProcesses) @@ -171,4 +214,6 @@ var _ = SynchronizedBeforeSuite(beforeSuiteForProcess1, beforeSuiteForAllProcess var _ = SynchronizedAfterSuite(func() {}, func() { setAllMemberClustersToLeave() checkIfAllMemberClustersHaveLeft() + + cleanupInvalidClusters() }) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 75fb52eb7..5f51b3006 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -7,6 +7,7 @@ package e2e import ( "fmt" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -19,6 +20,7 @@ import ( clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/test/e2e/framework" testutils "go.goms.io/fleet/test/e2e/v1alpha1/utils" ) @@ -29,7 +31,8 @@ func setAllMemberClustersToJoin() { mcObj := &clusterv1beta1.MemberCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: memberCluster.ClusterName, + Name: memberCluster.ClusterName, + Labels: labelsByClusterName[memberCluster.ClusterName], }, Spec: clusterv1beta1.MemberClusterSpec{ Identity: rbacv1.Subject{ @@ -87,6 +90,101 @@ func checkIfAllMemberClustersHaveJoined() { } } +// setupInvalidClusters simulates the case where some clusters in the fleet becomes unhealthy or +// have left the fleet. +func setupInvalidClusters() { + // Create a member cluster object that represents the unhealthy cluster. + mcObj := &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: memberCluster4Name, + }, + Spec: clusterv1beta1.MemberClusterSpec{ + Identity: rbacv1.Subject{ + Name: hubClusterSAName, + Kind: "ServiceAccount", + Namespace: fleetSystemNS, + }, + }, + } + Expect(hubClient.Create(ctx, mcObj)).To(Succeed(), "Failed to create member cluster object") + + // Mark the member cluster as unhealthy. + + // Use an Eventually block to avoid flakiness and conflicts; as the hub agent will attempt + // to reconcile this object at the same time. + Eventually(func() error { + memberCluster := clusterv1beta1.MemberCluster{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: memberCluster4Name}, &memberCluster); err != nil { + return err + } + + memberCluster.Status = clusterv1beta1.MemberClusterStatus{ + AgentStatus: []clusterv1beta1.AgentStatus{ + { + Type: clusterv1beta1.MemberAgent, + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.AgentJoined), + LastTransitionTime: metav1.Now(), + ObservedGeneration: 0, + Status: metav1.ConditionTrue, + Reason: "UnhealthyCluster", + Message: "set to be unhealthy", + }, + }, + LastReceivedHeartbeat: metav1.NewTime(time.Now().Add(time.Minute * (-20))), + }, + }, + } + + return hubClient.Status().Update(ctx, &memberCluster) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update member cluster status") + + // Create a member cluster object that represents the cluster that has left the fleet. + // + // Note that we use a custom finalizer to block the member cluster's deletion. + mcObj = &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: memberCluster5Name, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: clusterv1beta1.MemberClusterSpec{ + Identity: rbacv1.Subject{ + Name: hubClusterSAName, + Kind: "ServiceAccount", + Namespace: fleetSystemNS, + }, + }, + } + Expect(hubClient.Create(ctx, mcObj)).To(Succeed(), "Failed to create member cluster object") + Expect(hubClient.Delete(ctx, mcObj)).To(Succeed(), "Failed to delete member cluster object") +} + +func cleanupInvalidClusters() { + invalidClusterNames := []string{memberCluster4Name, memberCluster5Name} + for _, name := range invalidClusterNames { + mcObj := &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + Expect(hubClient.Delete(ctx, mcObj)).To(Succeed(), "Failed to delete member cluster object") + + Expect(hubClient.Get(ctx, types.NamespacedName{Name: name}, mcObj)).To(Succeed(), "Failed to get member cluster object") + mcObj.Finalizers = []string{} + Expect(hubClient.Update(ctx, mcObj)).To(Succeed(), "Failed to update member cluster object") + + Eventually(func() error { + mcObj := &clusterv1beta1.MemberCluster{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: name}, mcObj); !errors.IsNotFound(err) { + return fmt.Errorf("member cluster still exists or an unexpected error occurred: %w", err) + } + + return nil + }) + } +} + // createWorkResources creates some resources on the hub cluster for testing purposes. func createWorkResources() { ns := workNamespace() @@ -176,3 +274,31 @@ func cleanupCRP(name string) { removedActual := crpRemovedActual() Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove CRP %s", name) } + +func ensureCRPAndRelatedResourcesDeletion(crpName string, memberClusters []*framework.Cluster) { + // Delete the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + } + Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP") + + // Verify that all resources placed have been removed from specified member clusters. + for idx := range memberClusters { + memberCluster := memberClusters[idx] + + workResourcesRemovedActual := workNamespaceRemovedFromClusterActual(memberCluster) + Eventually(workResourcesRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove work resources from member cluster %s", memberCluster.ClusterName) + } + + // Verify that related finalizers have been removed from the CRP. + finalizerRemovedActual := allFinalizersExceptForCustomDeletionBlockerRemovedFromCRPActual() + Eventually(finalizerRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove controller finalizers from CRP") + + // Remove the custom deletion blocker finalizer from the CRP. + cleanupCRP(crpName) + + // Delete the created resources. + cleanupWorkResources() +}