Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: add v1beta1 E2E test cases (3/) #559

Merged
merged 3 commits into from
Oct 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/controllers/clusterresourceplacement/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -919,7 +919,7 @@ func (r *Reconciler) setResourcePlacementStatusAndResourceConditions(ctx context
scheduledCondition := metav1.Condition{
Status: metav1.ConditionFalse,
Type: string(fleetv1beta1.ResourceScheduledConditionType),
Reason: "ScheduleFailed",
Reason: ResourceScheduleFailedReason,
Message: fmt.Sprintf(resourcePlacementConditionScheduleFailedMessageFormat, unselected[i].ClusterName, unselected[i].Reason),
ObservedGeneration: crp.Generation,
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/controllers/clusterresourceplacement/placement_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ const (

// ResourceScheduleSucceededReason is the reason string of placement condition when the selected resources are scheduled.
ResourceScheduleSucceededReason = "ScheduleSucceeded"
// ResourceScheduleFailedReason is the reason string of placement condition when the scheduler failed to schedule the selected resources.
ResourceScheduleFailedReason = "ScheduleFailed"

// ResourcePlacementStatus schedule condition message formats
resourcePlacementConditionScheduleFailedMessageFormat = "%s is not selected: %s"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,7 @@ func TestSetPlacementStatus(t *testing.T) {
{
Status: metav1.ConditionFalse,
Type: string(fleetv1beta1.ResourceScheduledConditionType),
Reason: "ScheduleFailed",
Reason: ResourceScheduleFailedReason,
ObservedGeneration: crpGeneration,
},
},
Expand All @@ -802,7 +802,7 @@ func TestSetPlacementStatus(t *testing.T) {
{
Status: metav1.ConditionFalse,
Type: string(fleetv1beta1.ResourceScheduledConditionType),
Reason: "ScheduleFailed",
Reason: ResourceScheduleFailedReason,
ObservedGeneration: crpGeneration,
},
},
Expand Down
11 changes: 7 additions & 4 deletions test/e2e/actuals_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,16 +111,19 @@ func crpRolloutFailedConditions(generation int64) []metav1.Condition {
Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType),
Status: metav1.ConditionFalse,
ObservedGeneration: generation,
Reason: scheduler.NotFullyScheduledReason,
},
{
Type: string(placementv1beta1.ClusterResourcePlacementSynchronizedConditionType),
Status: metav1.ConditionTrue,
ObservedGeneration: generation,
Reason: clusterresourceplacement.SynchronizeSucceededReason,
},
{
Type: string(placementv1beta1.ClusterResourcePlacementAppliedConditionType),
Status: metav1.ConditionTrue,
ObservedGeneration: generation,
Reason: clusterresourceplacement.ApplySucceededReason,
},
}
}
Expand Down Expand Up @@ -223,6 +226,7 @@ func resourcePlacementRolloutFailedConditions(generation int64) []metav1.Conditi
Type: string(placementv1beta1.ResourceScheduledConditionType),
Status: metav1.ConditionFalse,
ObservedGeneration: generation,
Reason: clusterresourceplacement.ResourceScheduleFailedReason,
},
}
}
Expand Down Expand Up @@ -258,17 +262,16 @@ func crpStatusUpdatedActual(
return err
}

var wantPlacementStatus []placementv1beta1.ResourcePlacementStatus
wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{}
for _, name := range wantSelectedClusters {
wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{
ClusterName: name,
Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation),
})
}
for _, name := range wantUnselectedClusters {
for i := 0; i < len(wantUnselectedClusters); i++ {
wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{
ClusterName: name,
Conditions: resourcePlacementRolloutFailedConditions(crp.Generation),
Conditions: resourcePlacementRolloutFailedConditions(crp.Generation),
})
}

Expand Down
179 changes: 179 additions & 0 deletions test/e2e/placement_pickfixed_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
/*
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
*/

package e2e

import (
"fmt"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"

placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1"
"go.goms.io/fleet/test/e2e/framework"
)

var _ = Describe("placing resources using a CRP of PickFixed placement type", func() {
Context("pick some clusters", Ordered, func() {
crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())

BeforeAll(func() {
// Create the resources.
createWorkResources()

// Create the CRP.
crp := &placementv1beta1.ClusterResourcePlacement{
ObjectMeta: metav1.ObjectMeta{
Name: crpName,
// Add a custom finalizer; this would allow us to better observe
// the behavior of the controllers.
Finalizers: []string{customDeletionBlockerFinalizer},
},
Spec: placementv1beta1.ClusterResourcePlacementSpec{
ResourceSelectors: workResourceSelector(),
Strategy: placementv1beta1.RolloutStrategy{
Type: placementv1beta1.RollingUpdateRolloutStrategyType,
RollingUpdate: &placementv1beta1.RollingUpdateConfig{
UnavailablePeriodSeconds: pointer.Int(2),
},
},
Policy: &placementv1beta1.PlacementPolicy{
PlacementType: placementv1beta1.PickFixedPlacementType,
ClusterNames: []string{
memberCluster1Name,
},
},
},
}
Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP")
})

It("should place resources on specified clusters", func() {
resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1)
Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters")
})

It("should update CRP status as expected", func() {
crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster1Name}, nil)
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected")
})

Comment on lines +56 to +65
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the check order should be reversed

AfterAll(func() {
ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{memberCluster1})
})
})

Context("refreshing target clusters", Ordered, func() {
crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())

BeforeAll(func() {
// Create the resources.
createWorkResources()

// Create the CRP.
crp := &placementv1beta1.ClusterResourcePlacement{
ObjectMeta: metav1.ObjectMeta{
Name: crpName,
// Add a custom finalizer; this would allow us to better observe
// the behavior of the controllers.
Finalizers: []string{customDeletionBlockerFinalizer},
},
Spec: placementv1beta1.ClusterResourcePlacementSpec{
ResourceSelectors: workResourceSelector(),
Strategy: placementv1beta1.RolloutStrategy{
Type: placementv1beta1.RollingUpdateRolloutStrategyType,
RollingUpdate: &placementv1beta1.RollingUpdateConfig{
UnavailablePeriodSeconds: pointer.Int(2),
},
},
Policy: &placementv1beta1.PlacementPolicy{
PlacementType: placementv1beta1.PickFixedPlacementType,
ClusterNames: []string{
memberCluster1Name,
},
},
},
}
Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP")

// Verify that resources are placed on specified clusters.
resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster1)
Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters")

// Update the CRP to pick a different cluster.
Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp)).To(Succeed(), "Failed to get CRP")
crp.Spec.Policy.ClusterNames = []string{memberCluster2Name}
Expect(hubClient.Update(ctx, crp)).To(Succeed(), "Failed to update CRP")
Comment on lines +107 to +111
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder what logic should be in BeforeAll vs IT? I don't really see a clear pattern. I would get rid of the BeforeAll or only keep it to only the initial setup.

})

It("should place resources on newly specified clusters", func() {
resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(memberCluster2)
Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on specified clusters")
})

It("should remove resources from previously specified clusters", func() {
resourceRemovedActual := workNamespaceRemovedFromClusterActual(memberCluster1)
Eventually(resourceRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove resources from previously specified clusters")
})

It("should update CRP status as expected", func() {
crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster2Name}, nil)
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected")
})
Comment on lines +124 to +127
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think most user will check the CRP status first


AfterAll(func() {
ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{memberCluster2})
})
})

Context("pick unhealthy and non-existent clusters", Ordered, func() {
crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())

BeforeAll(func() {
// Create the resources.
createWorkResources()

// Create the CRP.
crp := &placementv1beta1.ClusterResourcePlacement{
ObjectMeta: metav1.ObjectMeta{
Name: crpName,
// Add a custom finalizer; this would allow us to better observe
// the behavior of the controllers.
Finalizers: []string{customDeletionBlockerFinalizer},
},
Spec: placementv1beta1.ClusterResourcePlacementSpec{
ResourceSelectors: workResourceSelector(),
Strategy: placementv1beta1.RolloutStrategy{
Type: placementv1beta1.RollingUpdateRolloutStrategyType,
RollingUpdate: &placementv1beta1.RollingUpdateConfig{
UnavailablePeriodSeconds: pointer.Int(2),
},
},
Policy: &placementv1beta1.PlacementPolicy{
PlacementType: placementv1beta1.PickFixedPlacementType,
ClusterNames: []string{
memberCluster4Name,
memberCluster5Name,
memberCluster6Name,
Comment on lines +159 to +162
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess some of them are unhealthy cluster, it would be great if their name reflect that to improve the test readability.

},
},
},
}
Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP")
})

It("should update CRP status as expected", func() {
crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), nil, []string{memberCluster4Name, memberCluster5Name, memberCluster6Name})
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected")
})

AfterAll(func() {
ensureCRPAndRelatedResourcesDeletion(crpName, []*framework.Cluster{})
})
})
})