diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index 6e797ac17..81e849361 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -919,7 +919,7 @@ func (r *Reconciler) setResourcePlacementStatusAndResourceConditions(ctx context scheduledCondition := metav1.Condition{ Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceScheduledConditionType), - Reason: "ScheduleFailed", + Reason: ResourceScheduleFailedReason, Message: fmt.Sprintf(resourcePlacementConditionScheduleFailedMessageFormat, unselected[i].ClusterName, unselected[i].Reason), ObservedGeneration: crp.Generation, } diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index ec2b03b69..e258d13e2 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -65,6 +65,8 @@ const ( // ResourceScheduleSucceededReason is the reason string of placement condition when the selected resources are scheduled. ResourceScheduleSucceededReason = "ScheduleSucceeded" + // ResourceScheduleFailedReason is the reason string of placement condition when the scheduler failed to schedule the selected resources. + ResourceScheduleFailedReason = "ScheduleFailed" // ResourcePlacementStatus schedule condition message formats resourcePlacementConditionScheduleFailedMessageFormat = "%s is not selected: %s" diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index b82acfd88..c79a180f0 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -792,7 +792,7 @@ func TestSetPlacementStatus(t *testing.T) { { Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceScheduledConditionType), - Reason: "ScheduleFailed", + Reason: ResourceScheduleFailedReason, ObservedGeneration: crpGeneration, }, }, @@ -802,7 +802,7 @@ func TestSetPlacementStatus(t *testing.T) { { Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceScheduledConditionType), - Reason: "ScheduleFailed", + Reason: ResourceScheduleFailedReason, ObservedGeneration: crpGeneration, }, }, diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index 56e92e18c..5ba9eb8ec 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -111,16 +111,19 @@ func crpRolloutFailedConditions(generation int64) []metav1.Condition { Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType), Status: metav1.ConditionFalse, ObservedGeneration: generation, + Reason: scheduler.NotFullyScheduledReason, }, { Type: string(placementv1beta1.ClusterResourcePlacementSynchronizedConditionType), Status: metav1.ConditionTrue, ObservedGeneration: generation, + Reason: clusterresourceplacement.SynchronizeSucceededReason, }, { Type: string(placementv1beta1.ClusterResourcePlacementAppliedConditionType), Status: metav1.ConditionTrue, ObservedGeneration: generation, + Reason: clusterresourceplacement.ApplySucceededReason, }, } } @@ -223,6 +226,7 @@ func resourcePlacementRolloutFailedConditions(generation int64) []metav1.Conditi Type: string(placementv1beta1.ResourceScheduledConditionType), Status: metav1.ConditionFalse, ObservedGeneration: generation, + Reason: clusterresourceplacement.ResourceScheduleFailedReason, }, } } @@ -258,17 +262,16 @@ func crpStatusUpdatedActual( return err } - var wantPlacementStatus []placementv1beta1.ResourcePlacementStatus + wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{} for _, name := range wantSelectedClusters { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ ClusterName: name, Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation), }) } - for _, name := range wantUnselectedClusters { + for i := 0; i < len(wantUnselectedClusters); i++ { wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{ - ClusterName: name, - Conditions: resourcePlacementRolloutFailedConditions(crp.Generation), + Conditions: resourcePlacementRolloutFailedConditions(crp.Generation), }) } diff --git a/test/e2e/placement_pickfixed_test.go b/test/e2e/placement_pickfixed_test.go new file mode 100644 index 000000000..899041396 --- /dev/null +++ b/test/e2e/placement_pickfixed_test.go @@ -0,0 +1,179 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/test/e2e/framework" +) + +var _ = Describe("placing resources using a CRP of PickFixed placement type", func() { + Context("pick some clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: pointer.Int(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1Name, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should place resources on specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster1Name}, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterAll(func() { + ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{memberCluster1}) + }) + }) + + Context("refreshing target clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: pointer.Int(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1Name, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + + // Verify that resources are placed on specified clusters. + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + + // Update the CRP to pick a different cluster. + Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp)).To(Succeed(), "Failed to get CRP") + crp.Spec.Policy.ClusterNames = []string{memberCluster2Name} + Expect(hubClient.Update(ctx, crp)).To(Succeed(), "Failed to update CRP") + }) + + It("should place resources on newly specified clusters", func() { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster2) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters") + }) + + It("should remove resources from previously specified clusters", func() { + resourceRemovedActual := workNamespaceRemovedFromClusterActual(memberCluster1) + Eventually(resourceRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove resources from previously specified clusters") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster2Name}, nil) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterAll(func() { + ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{memberCluster2}) + }) + }) + + Context("pick unhealthy and non-existent clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Create the resources. + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: pointer.Int(2), + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster4Name, + memberCluster5Name, + memberCluster6Name, + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), nil, []string{memberCluster4Name, memberCluster5Name, memberCluster6Name}) + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + }) + + AfterAll(func() { + ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{}) + }) + }) +})