Skip to content

Commit

Permalink
Reduced PR size
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelawyu committed Oct 7, 2023
1 parent ed37913 commit 13ce805
Show file tree
Hide file tree
Showing 9 changed files with 518 additions and 176 deletions.
1 change: 1 addition & 0 deletions pkg/controllers/clusterresourceplacement/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -915,6 +915,7 @@ func (r *Reconciler) setResourcePlacementStatusAndResourceConditions(ctx context
// TODO: we could improve the message by summarizing the failure reasons from all of the unselected clusters.
// For now, it starts from adding some sample failures of unselected clusters.
var rp fleetv1beta1.ResourcePlacementStatus
rp.ClusterName = unselected[i].ClusterName
scheduledCondition := metav1.Condition{
Status: metav1.ConditionFalse,
Type: string(fleetv1beta1.ResourceScheduledConditionType),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -780,6 +780,7 @@ func TestSetPlacementStatus(t *testing.T) {
FailedResourcePlacements: []fleetv1beta1.FailedResourcePlacement{},
},
{
ClusterName: "member-2",
Conditions: []metav1.Condition{
{
Status: metav1.ConditionFalse,
Expand All @@ -790,6 +791,7 @@ func TestSetPlacementStatus(t *testing.T) {
},
},
{
ClusterName: "member-3",
Conditions: []metav1.Condition{
{
Status: metav1.ConditionFalse,
Expand Down
121 changes: 81 additions & 40 deletions test/e2e/actuals_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func validateConfigMapOnCluster(cluster *framework.Cluster, name types.Namespace
ignoreObjectMetaAutoGeneratedFields,
ignoreObjectMetaAnnotationField,
); diff != "" {
return fmt.Errorf("app deployment diff (-got, +want): %s", diff)
return fmt.Errorf("app config map diff (-got, +want): %s", diff)
}

return nil
Expand All @@ -79,6 +79,26 @@ func workNamespaceAndConfigMapPlacedOnClusterActual(cluster *framework.Cluster)
}
}

func crpRolloutFailedConditions(generation int64) []metav1.Condition {
return []metav1.Condition{
{
Type: string(placementv1beta1.ClusterResourcePlacementScheduledConditionType),
Status: metav1.ConditionFalse,
ObservedGeneration: generation,
},
{
Type: string(placementv1beta1.ClusterResourcePlacementSynchronizedConditionType),
Status: metav1.ConditionTrue,
ObservedGeneration: generation,
},
{
Type: string(placementv1beta1.ClusterResourcePlacementAppliedConditionType),
Status: metav1.ConditionTrue,
ObservedGeneration: generation,
},
}
}

func crpRolloutCompletedConditions(generation int64) []metav1.Condition {
return []metav1.Condition{
{
Expand Down Expand Up @@ -119,58 +139,79 @@ func resourcePlacementRolloutCompletedConditions(generation int64) []metav1.Cond
}
}

func validateCRPStatus(name types.NamespacedName, wantSelectedResources []placementv1beta1.ResourceIdentifier) error {
crp := &placementv1beta1.ClusterResourcePlacement{}
if err := hubClient.Get(ctx, name, crp); err != nil {
return err
}

wantCRPConditions := crpRolloutCompletedConditions(crp.Generation)
wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{
func resourcePlacementRolloutFailedConditions(generation int64) []metav1.Condition {
return []metav1.Condition{
{
ClusterName: memberCluster1Name,
Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation),
Type: string(placementv1beta1.ResourceScheduledConditionType),
Status: metav1.ConditionFalse,
ObservedGeneration: generation,
},
}
}

func workResourceIdentifiers() []placementv1beta1.ResourceIdentifier {
workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess())
appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess())

return []placementv1beta1.ResourceIdentifier{
{
ClusterName: memberCluster2Name,
Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation),
Kind: "Namespace",
Name: workNamespaceName,
Version: "v1",
},
{
ClusterName: memberCluster3Name,
Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation),
Kind: "ConfigMap",
Name: appConfigMapName,
Version: "v1",
Namespace: workNamespaceName,
},
}
wantStatus := placementv1beta1.ClusterResourcePlacementStatus{
Conditions: wantCRPConditions,
PlacementStatuses: wantPlacementStatus,
SelectedResources: wantSelectedResources,
}
if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" {
return fmt.Errorf("CRP status diff (-got, +want): %s", diff)
}
return nil
}

func crpStatusUpdatedActual() func() error {
func crpStatusUpdatedActual(
wantSelectedResourceIdentifiers []placementv1beta1.ResourceIdentifier,
wantSelectedClusters, wantUnselectedClusters []string,
) func() error {
crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())
workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess())
appConfigMapName := fmt.Sprintf(appConfigMapNameTemplate, GinkgoParallelProcess())

return func() error {
wantSelectedResources := []placementv1beta1.ResourceIdentifier{
{
Kind: "Namespace",
Name: workNamespaceName,
Version: "v1",
},
{
Kind: "ConfigMap",
Name: appConfigMapName,
Version: "v1",
Namespace: workNamespaceName,
},
crp := &placementv1beta1.ClusterResourcePlacement{}
if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil {
return err
}

wantPlacementStatus := []placementv1beta1.ResourcePlacementStatus{}
for _, name := range wantSelectedClusters {
wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{
ClusterName: name,
Conditions: resourcePlacementRolloutCompletedConditions(crp.Generation),
})
}
for _, name := range wantUnselectedClusters {
wantPlacementStatus = append(wantPlacementStatus, placementv1beta1.ResourcePlacementStatus{
ClusterName: name,
Conditions: resourcePlacementRolloutFailedConditions(crp.Generation),
})
}
return validateCRPStatus(types.NamespacedName{Name: crpName}, wantSelectedResources)

wantCRPConditions := crpRolloutCompletedConditions(crp.Generation)
if len(wantUnselectedClusters) > 0 {
wantCRPConditions = crpRolloutFailedConditions(crp.Generation)
}

// Note that the CRP controller will only keep decisions regarding unselected clusters for a CRP if:
//
// * The CRP is of the PickN placement type and the required N count cannot be fulfilled; or
// * The CRP is of the PickFixed placement type and the list of target clusters speciified cannot be fulfilled.
wantStatus := placementv1beta1.ClusterResourcePlacementStatus{
Conditions: wantCRPConditions,
PlacementStatuses: wantPlacementStatus,
SelectedResources: wantSelectedResourceIdentifiers,
}
if diff := cmp.Diff(crp.Status, wantStatus, crpStatusCmpOptions...); diff != "" {
return fmt.Errorf("CRP status diff (-got, +want): %s", diff)
}
return nil
}
}

Expand Down
57 changes: 57 additions & 0 deletions test/e2e/placement_pickall_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
/*
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
*/

package e2e

import (
"fmt"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"

placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1"
)

var _ = Describe("placing resources using a CRP with no placement policy specified", Ordered, func() {
crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess())

BeforeAll(func() {
// Create the resources.
createWorkResources()

// Create the CRP.
crp := &placementv1beta1.ClusterResourcePlacement{
ObjectMeta: metav1.ObjectMeta{
Name: crpName,
// Add a custom finalizer; this would allow us to better observe
// the behavior of the controllers.
Finalizers: []string{customDeletionBlockerFinalizer},
},
Spec: placementv1beta1.ClusterResourcePlacementSpec{
ResourceSelectors: workResourceSelector(),
Strategy: placementv1beta1.RolloutStrategy{
Type: placementv1beta1.RollingUpdateRolloutStrategyType,
RollingUpdate: &placementv1beta1.RollingUpdateConfig{
UnavailablePeriodSeconds: pointer.Int(2),
},
},
},
}
Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP")
})

It("should place the resources on all member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters)

It("should update CRP status as expected", func() {
crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil)
Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected")
})

AfterAll(func() {
ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters)
})
})
Loading

0 comments on commit 13ce805

Please sign in to comment.